text
stringlengths 1
22.8M
|
|---|
```c++
/*
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* along with Scylla. If not, see <path_to_url
*/
#include <boost/range/algorithm/heap_algorithm.hpp>
#include <boost/range/algorithm/remove.hpp>
#include <boost/range/algorithm.hpp>
#include <boost/heap/binomial_heap.hpp>
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/set.hpp>
#include <boost/intrusive/slist.hpp>
#include <boost/range/adaptors.hpp>
#include <stack>
#include <seastar/core/memory.hh>
#include <seastar/core/align.hh>
#include <seastar/core/print.hh>
#include <seastar/core/metrics.hh>
#include <seastar/util/alloc_failure_injector.hh>
#include <seastar/util/backtrace.hh>
#include "utils/logalloc.hh"
#include "log.hh"
#include "utils/dynamic_bitset.hh"
#include "utils/log_heap.hh"
#include <random>
namespace bi = boost::intrusive;
standard_allocation_strategy standard_allocation_strategy_instance;
namespace {
#ifdef DEBUG_LSA_SANITIZER
class migrators : public enable_lw_shared_from_this<migrators> {
public:
static constexpr uint32_t maximum_id_value = std::numeric_limits<uint32_t>::max() / 2;
private:
struct migrator_entry {
const migrate_fn_type* _migrator;
saved_backtrace _registration;
saved_backtrace _deregistration;
};
std::unordered_map<uint32_t, migrator_entry> _migrators;
std::default_random_engine _random_engine { std::random_device()() };
std::uniform_int_distribution<uint32_t> _id_distribution { 0, maximum_id_value };
static logging::logger _logger;
private:
void on_error() { abort(); }
public:
uint32_t add(const migrate_fn_type* m) {
while (true) {
auto id = _id_distribution(_random_engine);
if (_migrators.count(id)) {
continue;
}
_migrators.emplace(id, migrator_entry { m, current_backtrace(), {} });
return id;
}
}
void remove(uint32_t idx) {
auto it = _migrators.find(idx);
if (it == _migrators.end()) {
_logger.error("Attempting to deregister migrator id {} which was never registered:\n{}",
idx, current_backtrace());
on_error();
}
if (!it->second._migrator) {
_logger.error("Attempting to double deregister migrator id {}:\n{}\n"
"Previously deregistered at:\n{}\nRegistered at:\n{}",
idx, current_backtrace(), it->second._deregistration,
it->second._registration);
on_error();
}
it->second._migrator = nullptr;
it->second._deregistration = current_backtrace();
}
const migrate_fn_type*& operator[](uint32_t idx) {
auto it = _migrators.find(idx);
if (it == _migrators.end()) {
_logger.error("Attempting to use migrator id {} that was never registered:\n{}",
idx, current_backtrace());
on_error();
}
if (!it->second._migrator) {
_logger.error("Attempting to use deregistered migrator id {}:\n{}\n"
"Deregistered at:\n{}\nRegistered at:\n{}",
idx, current_backtrace(), it->second._deregistration,
it->second._registration);
on_error();
}
return it->second._migrator;
}
};
logging::logger migrators::_logger("lsa-migrator-sanitizer");
#else
class migrators : public enable_lw_shared_from_this<migrators> {
std::vector<const migrate_fn_type*> _migrators;
std::deque<uint32_t> _unused_ids;
public:
static constexpr uint32_t maximum_id_value = std::numeric_limits<uint32_t>::max() / 2;
uint32_t add(const migrate_fn_type* m) {
if (!_unused_ids.empty()) {
auto idx = _unused_ids.front();
_unused_ids.pop_front();
_migrators[idx] = m;
return idx;
}
_migrators.push_back(m);
return _migrators.size() - 1;
}
void remove(uint32_t idx) {
_unused_ids.push_back(idx);
}
const migrate_fn_type*& operator[](uint32_t idx) {
return _migrators[idx];
}
};
#endif
static
migrators&
static_migrators() {
static thread_local lw_shared_ptr<migrators> obj = make_lw_shared<migrators>();
return *obj;
}
}
namespace debug {
thread_local migrators* static_migrators = &::static_migrators();
}
uint32_t
migrate_fn_type::register_migrator(migrate_fn_type* m) {
auto& migrators = *debug::static_migrators;
auto idx = migrators.add(m);
m->_migrators = migrators.shared_from_this();
return idx;
}
void
migrate_fn_type::unregister_migrator(uint32_t index) {
static_migrators().remove(index);
}
namespace logalloc {
#ifdef DEBUG_LSA_SANITIZER
class region_sanitizer {
struct allocation {
size_t size;
saved_backtrace backtrace;
};
private:
static logging::logger logger;
bool _broken = false;
std::unordered_map<const void*, allocation> _allocations;
private:
template<typename Function>
void run_and_handle_errors(Function&& fn) noexcept {
memory::disable_failure_guard dfg;
if (_broken) {
return;
}
try {
fn();
} catch (...) {
logger.error("Internal error, disabling the sanitizer: {}", std::current_exception());
_broken = true;
_allocations.clear();
}
}
private:
void on_error() { abort(); }
public:
void on_region_destruction() noexcept {
run_and_handle_errors([&] {
if (_allocations.empty()) {
return;
}
for (auto [ptr, alloc] : _allocations) {
logger.error("Leaked {} byte object at {} allocated from:\n{}",
alloc.size, ptr, alloc.backtrace);
}
on_error();
});
}
void on_allocation(const void* ptr, size_t size) noexcept {
run_and_handle_errors([&] {
auto [ it, success ] = _allocations.emplace(ptr, allocation { size, current_backtrace() });
if (!success) {
logger.error("Attempting to allocate an {} byte object at an already occupied address {}:\n{}\n"
"Previous allocation of {} bytes:\n{}",
ptr, size, current_backtrace(), it->second.size, it->second.backtrace);
on_error();
}
});
}
void on_free(const void* ptr, size_t size) noexcept {
run_and_handle_errors([&] {
auto it = _allocations.find(ptr);
if (it == _allocations.end()) {
logger.error("Attempting to free an object at {} (size: {}) that does not exist\n{}",
ptr, size, current_backtrace());
on_error();
}
if (it->second.size != size) {
logger.error("Mismatch between allocation and deallocation size of object at {}: {} vs. {}:\n{}\n"
"Allocated at:\n{}",
ptr, it->second.size, size, current_backtrace(), it->second.backtrace);
on_error();
}
_allocations.erase(it);
});
}
void on_migrate(const void* src, size_t size, const void* dst) noexcept {
run_and_handle_errors([&] {
auto it_src = _allocations.find(src);
if (it_src == _allocations.end()) {
logger.error("Attempting to migrate an object at {} (size: {}) that does not exist",
src, size);
on_error();
}
if (it_src->second.size != size) {
logger.error("Mismatch between allocation and migration size of object at {}: {} vs. {}\n"
"Allocated at:\n{}",
src, it_src->second.size, size, it_src->second.backtrace);
on_error();
}
auto [ it_dst, success ] = _allocations.emplace(dst, std::move(it_src->second));
if (!success) {
logger.error("Attempting to migrate an {} byte object to an already occupied address {}:\n"
"Migrated object allocated from:\n{}\n"
"Previous allocation of {} bytes at the destination:\n{}",
size, dst, it_src->second.backtrace, it_dst->second.size, it_dst->second.backtrace);
on_error();
}
_allocations.erase(it_src);
});
}
void merge(region_sanitizer& other) noexcept {
run_and_handle_errors([&] {
_broken = other._broken;
if (_broken) {
_allocations.clear();
} else {
_allocations.merge(other._allocations);
if (!other._allocations.empty()) {
for (auto [ptr, o_alloc] : other._allocations) {
auto& alloc = _allocations.at(ptr);
logger.error("Conflicting allocations at address {} in merged regions\n"
"{} bytes allocated from:\n{}\n"
"{} bytes allocated from:\n{}",
ptr, alloc.size, alloc.backtrace, o_alloc.size, o_alloc.backtrace);
}
on_error();
}
}
});
}
};
logging::logger region_sanitizer::logger("lsa-sanitizer");
#else
struct region_sanitizer {
void on_region_destruction() noexcept { }
void on_allocation(const void*, size_t) noexcept { }
void on_free(const void* ptr, size_t size) noexcept { }
void on_migrate(const void*, size_t, const void*) noexcept { }
void merge(region_sanitizer&) noexcept { }
};
#endif
struct segment;
static logging::logger llogger("lsa");
static logging::logger timing_logger("lsa-timing");
static thread_local tracker tracker_instance;
using clock = std::chrono::steady_clock;
class tracker::impl {
std::vector<region::impl*> _regions;
seastar::metrics::metric_groups _metrics;
bool _reclaiming_enabled = true;
size_t _reclamation_step = 1;
bool _abort_on_bad_alloc = false;
private:
// Prevents tracker's reclaimer from running while live. Reclaimer may be
// invoked synchronously with allocator. This guard ensures that this
// object is not re-entered while inside one of the tracker's methods.
struct reclaiming_lock {
impl& _ref;
bool _prev;
reclaiming_lock(impl& ref)
: _ref(ref)
, _prev(ref._reclaiming_enabled)
{
_ref._reclaiming_enabled = false;
}
~reclaiming_lock() {
_ref._reclaiming_enabled = _prev;
}
};
friend class tracker_reclaimer_lock;
public:
impl();
~impl();
void register_region(region::impl*);
void unregister_region(region::impl*) noexcept;
size_t reclaim(size_t bytes);
reactor::idle_cpu_handler_result compact_on_idle(reactor::work_waiting_on_reactor check_for_work);
size_t compact_and_evict(size_t bytes);
size_t compact_and_evict_locked(size_t bytes);
void full_compaction();
void reclaim_all_free_segments();
occupancy_stats region_occupancy();
occupancy_stats occupancy();
size_t non_lsa_used_space();
void set_reclamation_step(size_t step_in_segments) { _reclamation_step = step_in_segments; }
size_t reclamation_step() const { return _reclamation_step; }
void enable_abort_on_bad_alloc() { _abort_on_bad_alloc = true; }
bool should_abort_on_bad_alloc() const { return _abort_on_bad_alloc; }
};
class tracker_reclaimer_lock {
tracker::impl::reclaiming_lock _lock;
public:
tracker_reclaimer_lock() : _lock(shard_tracker().get_impl()) { }
};
tracker::tracker()
: _impl(std::make_unique<impl>())
, _reclaimer([this] { return reclaim(); }, memory::reclaimer_scope::sync)
{ }
tracker::~tracker() {
}
size_t tracker::reclaim(size_t bytes) {
return _impl->reclaim(bytes);
}
reactor::idle_cpu_handler_result tracker::compact_on_idle(reactor::work_waiting_on_reactor check_for_work) {
return _impl->compact_on_idle(check_for_work);
}
occupancy_stats tracker::region_occupancy() {
return _impl->region_occupancy();
}
occupancy_stats tracker::occupancy() {
return _impl->occupancy();
}
size_t tracker::non_lsa_used_space() const {
return _impl->non_lsa_used_space();
}
void tracker::full_compaction() {
return _impl->full_compaction();
}
void tracker::reclaim_all_free_segments() {
return _impl->reclaim_all_free_segments();
}
tracker& shard_tracker() {
return tracker_instance;
}
struct segment {
static constexpr int size_shift = segment_size_shift;
using size_type = std::conditional_t<(size_shift < 16), uint16_t, uint32_t>;
static constexpr size_t size = segment_size;
uint8_t data[size];
segment() noexcept { }
template<typename T = void>
const T* at(size_t offset) const {
return reinterpret_cast<const T*>(data + offset);
}
template<typename T = void>
T* at(size_t offset) {
return reinterpret_cast<T*>(data + offset);
}
bool is_empty() const;
void record_alloc(size_type size);
void record_free(size_type size);
occupancy_stats occupancy() const;
#ifndef SEASTAR_DEFAULT_ALLOCATOR
static void* operator new(size_t size) = delete;
static void* operator new(size_t, void* ptr) noexcept { return ptr; }
static void operator delete(void* ptr) = delete;
#endif
};
static constexpr size_t max_managed_object_size = segment_size * 0.1;
static constexpr auto max_used_space_ratio_for_compaction = 0.85;
static constexpr size_t max_used_space_for_compaction = segment_size * max_used_space_ratio_for_compaction;
static constexpr size_t min_free_space_for_compaction = segment_size - max_used_space_for_compaction;
static_assert(min_free_space_for_compaction >= max_managed_object_size,
"Segments which cannot fit max_managed_object_size must not be considered compactible for the sake of forward progress of compaction");
// Since we only compact if there's >= min_free_space_for_compaction of free space,
// we use min_free_space_for_compaction as the histogram's minimum size and put
// everything below that value in the same bucket.
extern constexpr log_heap_options segment_descriptor_hist_options(min_free_space_for_compaction, 3, segment_size);
struct segment_descriptor : public log_heap_hook<segment_descriptor_hist_options> {
segment::size_type _free_space;
region::impl* _region;
segment_descriptor()
: _region(nullptr)
{ }
bool is_empty() const {
return _free_space == segment::size;
}
occupancy_stats occupancy() const {
return { _free_space, segment::size };
}
void record_alloc(segment::size_type size) {
_free_space -= size;
}
void record_free(segment::size_type size) {
_free_space += size;
}
};
using segment_descriptor_hist = log_heap<segment_descriptor, segment_descriptor_hist_options>;
#ifndef SEASTAR_DEFAULT_ALLOCATOR
// Segment pool implementation for the seastar allocator.
// Stores segment descriptors in a vector which is indexed using most significant
// bits of segment address.
//
// We prefer using high-address segments, and returning low-address segments to the seastar
// allocator in order to segregate lsa and non-lsa memory, to reduce fragmentation.
class segment_pool {
memory::memory_layout _layout;
uintptr_t _segments_base; // The address of the first segment
std::vector<segment_descriptor> _segments;
size_t _segments_in_use{};
utils::dynamic_bitset _lsa_owned_segments_bitmap; // owned by this
utils::dynamic_bitset _lsa_free_segments_bitmap; // owned by this, but not in use
size_t _free_segments = 0;
size_t _current_emergency_reserve_goal = 1;
size_t _emergency_reserve_max = 30;
bool _allocation_failure_flag = false;
size_t _non_lsa_memory_in_use = 0;
size_t _non_lsa_reserve = 0;
// Invariants - a segment is in one of the following states:
// In use by some region
// - set in _lsa_owned_segments_bitmap
// - clear in _lsa_free_segments_bitmap
// - counted in _segments_in_use
// Free:
// - set in _lsa_owned_segments_bitmap
// - set in _lsa_free_segments_bitmap
// - counted in _unreserved_free_segments
// Non-lsa:
// - clear everywhere
private:
segment* allocate_segment(size_t reserve);
// reclamation_step is in segment units
segment* allocate_segment(size_t reserve, size_t reclamation_step);
void deallocate_segment(segment* seg);
friend void* segment::operator new(size_t);
friend void segment::operator delete(void*);
segment* allocate_or_fallback_to_reserve();
void free_or_restore_to_reserve(segment* seg) noexcept;
segment* segment_from_idx(size_t idx) const {
return reinterpret_cast<segment*>(_segments_base) + idx;
}
size_t idx_from_segment(segment* seg) const {
return seg - reinterpret_cast<segment*>(_segments_base);
}
size_t max_segments() const {
return (_layout.end - _segments_base) / segment::size;
}
bool can_allocate_more_memory(size_t size) {
return memory::stats().free_memory() >= _non_lsa_reserve + size;
}
public:
segment_pool();
void prime(size_t available_memory, size_t min_free_memory);
segment* new_segment(region::impl* r);
segment_descriptor& descriptor(const segment*);
// Returns segment containing given object or nullptr.
segment* containing_segment(const void* obj) const;
segment* segment_from(const segment_descriptor& desc);
void free_segment(segment*) noexcept;
void free_segment(segment*, segment_descriptor&) noexcept;
size_t segments_in_use() const;
size_t current_emergency_reserve_goal() const { return _current_emergency_reserve_goal; }
void set_emergency_reserve_max(size_t new_size) { _emergency_reserve_max = new_size; }
size_t emergency_reserve_max() { return _emergency_reserve_max; }
void set_current_emergency_reserve_goal(size_t goal) { _current_emergency_reserve_goal = goal; }
void clear_allocation_failure_flag() { _allocation_failure_flag = false; }
bool allocation_failure_flag() { return _allocation_failure_flag; }
void refill_emergency_reserve();
void update_non_lsa_memory_in_use(ssize_t n) {
_non_lsa_memory_in_use += n;
}
size_t non_lsa_memory_in_use() const {
return _non_lsa_memory_in_use;
}
size_t total_memory_in_use() const {
return _non_lsa_memory_in_use + _segments_in_use * segment::size;
}
struct reservation_goal;
void set_region(const segment* seg, region::impl* r) {
set_region(descriptor(seg), r);
}
void set_region(segment_descriptor& desc, region::impl* r) {
desc._region = r;
}
bool migrate_segment(segment* src, segment* dst);
size_t reclaim_segments(size_t target);
void reclaim_all_free_segments() {
reclaim_segments(std::numeric_limits<size_t>::max());
}
struct stats {
size_t segments_migrated;
size_t segments_compacted;
uint64_t memory_allocated;
uint64_t memory_compacted;
};
private:
stats _stats{};
public:
const stats& statistics() const { return _stats; }
void on_segment_migration() { _stats.segments_migrated++; }
void on_segment_compaction(size_t used_size);
void on_memory_allocation(size_t size);
size_t unreserved_free_segments() const { return _free_segments - std::min(_free_segments, _emergency_reserve_max); }
size_t free_segments() const { return _free_segments; }
};
size_t segment_pool::reclaim_segments(size_t target) {
// Reclaimer tries to release segments occupying lower parts of the address
// space.
llogger.debug("Trying to reclaim {} segments", target);
// Reclamation. Migrate segments to higher addresses and shrink segment pool.
size_t reclaimed_segments = 0;
// We may fail to reclaim because a region has reclaim disabled (usually because
// it is in an allocating_section. Failed reclaims can cause high CPU usage
// if all of the lower addresses happen to be in a reclaim-disabled region (this
// is somewhat mitigated by the fact that checking for reclaim disabled is very
// cheap), but worse, failing a segment reclaim can lead to reclaimed memory
// being fragmented. This results in the original allocation continuing to fail.
//
// To combat that, we limit the number of failed reclaims. If we reach the limit,
// we fail the reclaim. The surrounding allocating_section will release the
// reclaim_lock, and increase reserves, which will result in reclaim being
// retried with all regions being reclaimable, and succeed in allocating
// contiguous memory.
size_t failed_reclaims_allowance = 10;
for (size_t src_idx = _lsa_owned_segments_bitmap.find_first_set();
reclaimed_segments != target && src_idx != utils::dynamic_bitset::npos
&& _free_segments > _current_emergency_reserve_goal;
src_idx = _lsa_owned_segments_bitmap.find_next_set(src_idx)) {
auto src = segment_from_idx(src_idx);
if (!_lsa_free_segments_bitmap.test(src_idx)) {
auto dst_idx = _lsa_free_segments_bitmap.find_last_set();
if (dst_idx == utils::dynamic_bitset::npos || dst_idx <= src_idx) {
break;
}
assert(_lsa_owned_segments_bitmap.test(dst_idx));
auto could_migrate = migrate_segment(src, segment_from_idx(dst_idx));
if (!could_migrate) {
if (--failed_reclaims_allowance == 0) {
break;
}
continue;
}
}
_lsa_free_segments_bitmap.clear(src_idx);
_lsa_owned_segments_bitmap.clear(src_idx);
src->~segment();
::free(src);
++reclaimed_segments;
--_free_segments;
}
llogger.debug("Reclaimed {} segments (requested {})", reclaimed_segments, target);
return reclaimed_segments;
}
segment* segment_pool::allocate_segment(size_t reserve) {
return allocate_segment(reserve, shard_tracker().reclamation_step());
}
segment* segment_pool::allocate_segment(size_t reserve, size_t reclamation_step)
{
//
// When allocating a segment we want to avoid:
// - LSA and general-purpose allocator shouldn't constantly fight each
// other for every last bit of memory
//
// allocate_segment() always works with LSA reclaimer disabled.
// 1. Firstly, the algorithm tries to allocate an lsa-owned but free segment
// 2. If no free segmented is available, a new segment is allocated from the
// system allocator. However, if the free memory is below set threshold
// this step is skipped.
// 3. Finally, the algorithm ties to compact and evict data stored in LSA
// memory in order to reclaim enough segments.
//
do {
tracker_reclaimer_lock rl;
if (_free_segments > reserve) {
auto free_idx = _lsa_free_segments_bitmap.find_last_set();
_lsa_free_segments_bitmap.clear(free_idx);
auto seg = segment_from_idx(free_idx);
--_free_segments;
return seg;
}
if (can_allocate_more_memory(segment::size)) {
auto p = aligned_alloc(segment::size, segment::size);
if (!p) {
continue;
}
auto seg = new (p) segment;
auto idx = idx_from_segment(seg);
_lsa_owned_segments_bitmap.set(idx);
return seg;
}
} while (shard_tracker().get_impl().compact_and_evict(reclamation_step * segment::size));
if (shard_tracker().should_abort_on_bad_alloc()) {
llogger.error("Aborting due to segment allocation failure");
abort();
}
return nullptr;
}
void segment_pool::deallocate_segment(segment* seg)
{
assert(_lsa_owned_segments_bitmap.test(idx_from_segment(seg)));
_lsa_free_segments_bitmap.set(idx_from_segment(seg));
_free_segments++;
}
void segment_pool::refill_emergency_reserve() {
while (_free_segments < _emergency_reserve_max) {
auto seg = allocate_segment(_emergency_reserve_max, _emergency_reserve_max - _free_segments);
if (!seg) {
throw std::bad_alloc();
}
++_segments_in_use;
free_segment(seg);
}
}
segment_descriptor&
segment_pool::descriptor(const segment* seg) {
uintptr_t seg_addr = reinterpret_cast<uintptr_t>(seg);
uintptr_t index = (seg_addr - _segments_base) >> segment::size_shift;
return _segments[index];
}
segment*
segment_pool::containing_segment(const void* obj) const {
auto addr = reinterpret_cast<uintptr_t>(obj);
auto offset = addr & (segment::size - 1);
auto index = (addr - _segments_base) >> segment::size_shift;
auto& desc = _segments[index];
if (desc._region) {
return reinterpret_cast<segment*>(addr - offset);
} else {
return nullptr;
}
}
segment*
segment_pool::segment_from(const segment_descriptor& desc) {
assert(desc._region);
auto index = &desc - &_segments[0];
return reinterpret_cast<segment*>(_segments_base + (index << segment::size_shift));
}
segment*
segment_pool::allocate_or_fallback_to_reserve() {
auto seg = allocate_segment(_current_emergency_reserve_goal);
if (!seg) {
_allocation_failure_flag = true;
throw std::bad_alloc();
}
return seg;
}
segment*
segment_pool::new_segment(region::impl* r) {
auto seg = allocate_or_fallback_to_reserve();
++_segments_in_use;
segment_descriptor& desc = descriptor(seg);
desc._free_space = segment::size;
desc._region = r;
return seg;
}
void segment_pool::free_segment(segment* seg) noexcept {
free_segment(seg, descriptor(seg));
}
void segment_pool::free_segment(segment* seg, segment_descriptor& desc) noexcept {
llogger.trace("Releasing segment {}", seg);
desc._region = nullptr;
deallocate_segment(seg);
--_segments_in_use;
}
segment_pool::segment_pool()
: _layout(memory::get_memory_layout())
, _segments_base(align_down(_layout.start, (uintptr_t)segment::size))
, _segments(max_segments())
, _lsa_owned_segments_bitmap(max_segments())
, _lsa_free_segments_bitmap(max_segments())
{
}
void segment_pool::prime(size_t available_memory, size_t min_free_memory) {
auto old_emergency_reserve = std::exchange(_emergency_reserve_max, std::numeric_limits<size_t>::max());
try {
// Allocate all of memory so that we occupy the top part. Afterwards, we'll start
// freeing from the bottom.
_non_lsa_reserve = 0;
refill_emergency_reserve();
} catch (std::bad_alloc&) {
_emergency_reserve_max = old_emergency_reserve;
}
// We want to leave more free memory than just min_free_memory() in order to reduce
// the frequency of expensive segment-migrating reclaim() called by the seastar allocator.
size_t min_gap = 1 * 1024 * 1024;
size_t max_gap = 64 * 1024 * 1024;
size_t gap = std::min(max_gap, std::max(available_memory / 16, min_gap));
_non_lsa_reserve = min_free_memory + gap;
// Since the reclaimer is not yet in place, free some low memory for general use
reclaim_segments(_non_lsa_reserve / segment::size);
}
#else
// Segment pool version for the standard allocator. Slightly less efficient
// than the version for seastar's allocator.
class segment_pool {
class segment_deleter {
segment_pool* _pool;
public:
explicit segment_deleter(segment_pool* pool) : _pool(pool) {}
void operator()(segment* seg) const noexcept {
if (seg) {
::free(seg);
_pool->_std_memory_available += segment::size;
}
}
};
std::unordered_map<const segment*, segment_descriptor> _segments;
std::unordered_map<const segment_descriptor*, segment*> _segment_descs;
std::stack<std::unique_ptr<segment, segment_deleter>> _free_segments;
size_t _segments_in_use{};
size_t _non_lsa_memory_in_use = 0;
size_t _std_memory_available = size_t(1) << 30; // emulate 1GB per shard
friend segment_deleter;
public:
void prime(size_t available_memory, size_t min_free_memory) {}
segment* new_segment(region::impl* r) {
if (_free_segments.empty()) {
if (_std_memory_available < segment::size) {
throw std::bad_alloc();
}
std::unique_ptr<segment, segment_deleter> seg{new (with_alignment(segment::size)) segment, segment_deleter(this)};
_std_memory_available -= segment::size;
_free_segments.push(std::move(seg));
}
++_segments_in_use;
auto seg = _free_segments.top().release();
_free_segments.pop();
assert((reinterpret_cast<uintptr_t>(seg) & (sizeof(segment) - 1)) == 0);
segment_descriptor& desc = _segments[seg];
desc._free_space = segment::size;
desc._region = r;
_segment_descs[&desc] = seg;
return seg;
}
segment_descriptor& descriptor(const segment* seg) {
auto i = _segments.find(seg);
if (i != _segments.end()) {
return i->second;
} else {
segment_descriptor& desc = _segments[seg];
desc._region = nullptr;
return desc;
}
}
segment* segment_from(segment_descriptor& desc) {
auto i = _segment_descs.find(&desc);
assert(i != _segment_descs.end());
return i->second;
}
void free_segment(segment* seg, segment_descriptor& desc) {
free_segment(seg);
}
void free_segment(segment* seg) {
--_segments_in_use;
auto i = _segments.find(seg);
assert(i != _segments.end());
_segment_descs.erase(&i->second);
_segments.erase(i);
std::unique_ptr<segment, segment_deleter> useg{seg, segment_deleter(this)};
_free_segments.push(std::move(useg));
}
segment* containing_segment(const void* obj) const {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
auto seg = reinterpret_cast<segment*>(align_down(addr, static_cast<uintptr_t>(segment::size)));
auto i = _segments.find(seg);
if (i == _segments.end()) {
return nullptr;
}
return seg;
}
size_t segments_in_use() const;
size_t current_emergency_reserve_goal() const { return 0; }
void set_current_emergency_reserve_goal(size_t goal) { }
void set_emergency_reserve_max(size_t new_size) { }
size_t emergency_reserve_max() { return 0; }
void clear_allocation_failure_flag() { }
bool allocation_failure_flag() { return false; }
void refill_emergency_reserve() {}
void update_non_lsa_memory_in_use(ssize_t n) {
_non_lsa_memory_in_use += n;
}
size_t non_lsa_memory_in_use() const {
return _non_lsa_memory_in_use;
}
size_t total_memory_in_use() const {
return _non_lsa_memory_in_use + _segments_in_use * segment::size;
}
size_t unreserved_free_segments() const {
return 0;
}
void set_region(const segment* seg, region::impl* r) {
set_region(descriptor(seg), r);
}
void set_region(segment_descriptor& desc, region::impl* r) {
desc._region = r;
}
size_t reclaim_segments(size_t target) {
size_t reclaimed = 0;
while (reclaimed < target && !_free_segments.empty()) {
_free_segments.pop();
++reclaimed;
}
return reclaimed;
}
void reclaim_all_free_segments() {
reclaim_segments(std::numeric_limits<size_t>::max());
}
struct stats {
size_t segments_migrated;
size_t segments_compacted;
uint64_t memory_allocated;
uint64_t memory_compacted;
};
private:
stats _stats{};
public:
const stats& statistics() const { return _stats; }
void on_segment_migration() { _stats.segments_migrated++; }
void on_segment_compaction(size_t used_space);
void on_memory_allocation(size_t size);
size_t free_segments() const { return 0; }
public:
class reservation_goal;
};
#endif
void segment_pool::on_segment_compaction(size_t used_size) {
_stats.segments_compacted++;
_stats.memory_compacted += used_size;
}
void segment_pool::on_memory_allocation(size_t size) {
_stats.memory_allocated += size;
}
// RAII wrapper to maintain segment_pool::current_emergency_reserve_goal()
class segment_pool::reservation_goal {
segment_pool& _sp;
size_t _old_goal;
public:
reservation_goal(segment_pool& sp, size_t goal)
: _sp(sp), _old_goal(_sp.current_emergency_reserve_goal()) {
_sp.set_current_emergency_reserve_goal(goal);
}
~reservation_goal() {
_sp.set_current_emergency_reserve_goal(_old_goal);
}
};
size_t segment_pool::segments_in_use() const {
return _segments_in_use;
}
static thread_local segment_pool shard_segment_pool;
void segment::record_alloc(segment::size_type size) {
shard_segment_pool.descriptor(this).record_alloc(size);
}
void segment::record_free(segment::size_type size) {
shard_segment_pool.descriptor(this).record_free(size);
}
bool segment::is_empty() const {
return shard_segment_pool.descriptor(this).is_empty();
}
occupancy_stats
segment::occupancy() const {
return { shard_segment_pool.descriptor(this)._free_space, segment::size };
}
//
// For interface documentation see logalloc::region and allocation_strategy.
//
// Allocation dynamics.
//
// Objects are allocated inside fixed-size segments. Objects don't cross
// segment boundary. Active allocations are served from a single segment using
// bump-the-pointer method. That segment is called the active segment. When
// active segment fills up, it is closed. Closed segments are kept in a heap
// which orders them by occupancy. As objects are freed, the segment become
// sparser and are eventually released. Objects which are too large are
// allocated using standard allocator.
//
// Segment layout.
//
// Objects in a segment are laid out sequentially. Each object is preceded by
// a descriptor (see object_descriptor). Object alignment is respected, so if
// there is a gap between the end of current object and the next object's
// descriptor, a trunk of the object descriptor is left right after the
// current object with the flags byte indicating the amount of padding.
//
// Per-segment metadata is kept in a separate array, managed by segment_pool
// object.
//
class region_impl final : public basic_region_impl {
// Serialized object descriptor format:
// byte0 byte1 ... byte[n-1]
// bit0-bit5: ULEB64 significand
// bit6: 1 iff first byte
// bit7: 1 iff last byte
// This format allows decoding both forwards and backwards (by scanning for bit7/bit6 respectively);
// backward decoding is needed to recover the descriptor from the object pointer when freeing.
//
// Significand interpretation (value = n):
// even: dead object, size n/2 (including descriptor)
// odd: migrate_fn_type at index n/2, from static_migrators()
class object_descriptor {
private:
uint32_t _n;
private:
explicit object_descriptor(uint32_t n) : _n(n) {}
public:
static_assert(migrators::maximum_id_value <= std::numeric_limits<uint32_t>::max()
&& uint64_t(migrators::maximum_id_value) * 2 + 1 <= std::numeric_limits<uint32_t>::max());
object_descriptor(allocation_strategy::migrate_fn migrator)
: _n(migrator->index() * 2 + 1)
{ }
static object_descriptor make_dead(size_t size) {
return object_descriptor(size * 2);
}
allocation_strategy::migrate_fn migrator() const {
return static_migrators()[_n / 2];
}
uint8_t alignment() const {
return migrator()->align();
}
// excluding descriptor
segment::size_type live_size(const void* obj) const {
return migrator()->size(obj);
}
// including descriptor
segment::size_type dead_size() const {
return _n / 2;
}
bool is_live() const {
return (_n & 1) == 1;
}
segment::size_type encoded_size() const {
return log2floor(_n) / 6 + 1; // 0 is illegal
}
void encode(char*& pos) const {
uint64_t b = 64;
auto n = _n;
do {
b |= n & 63;
n >>= 6;
if (!n) {
b |= 128;
}
*pos++ = b;
b = 0;
} while (n);
}
// non-canonical encoding to allow padding (for alignment); encoded_size must be
// sufficient (greater than this->encoded_size())
void encode(char*& pos, size_t encoded_size) const {
uint64_t b = 64;
auto n = _n;
do {
b |= n & 63;
n >>= 6;
if (!--encoded_size) {
b |= 128;
}
*pos++ = b;
b = 0;
} while (encoded_size);
}
static object_descriptor decode_forwards(const char*& pos) {
unsigned n = 0;
unsigned shift = 0;
auto p = pos; // avoid aliasing; p++ doesn't touch memory
uint8_t b;
do {
b = *p++;
if (shift < 32) {
// non-canonical encoding can cause large shift; undefined in C++
n |= uint32_t(b & 63) << shift;
}
shift += 6;
} while ((b & 128) == 0);
pos = p;
return object_descriptor(n);
}
static object_descriptor decode_backwards(const char*& pos) {
unsigned n = 0;
uint8_t b;
auto p = pos; // avoid aliasing; --p doesn't touch memory
do {
b = *--p;
n = (n << 6) | (b & 63);
} while ((b & 64) == 0);
pos = p;
return object_descriptor(n);
}
friend std::ostream& operator<<(std::ostream& out, const object_descriptor& desc) {
if (!desc.is_live()) {
return out << sprint("{free %d}", desc.dead_size());
} else {
auto m = desc.migrator();
auto x = reinterpret_cast<uintptr_t>(&desc) + sizeof(desc);
x = align_up(x, m->align());
auto obj = reinterpret_cast<const void*>(x);
return out << sprint("{migrator=%p, alignment=%d, size=%d}",
(void*)m, m->align(), m->size(obj));
}
}
};
private:
region* _region = nullptr;
region_group* _group = nullptr;
segment* _active = nullptr;
size_t _active_offset;
segment_descriptor_hist _segment_descs; // Contains only closed segments
occupancy_stats _closed_occupancy;
occupancy_stats _non_lsa_occupancy;
// This helps us keeping track of the region_group* heap. That's because we call update before
// we have a chance to update the occupancy stats - mainly because at this point we don't know
// what will we do with the new segment. Also, because we are not ever interested in the
// fraction used, we'll keep it as a scalar and convert when we need to present it as an
// occupancy. We could actually just present this as a scalar as well and never use occupancies,
// but consistency is good.
size_t _evictable_space = 0;
// This is a mask applied to _evictable_space with bitwise-and before it's returned from evictable_space().
// Used for forcing the result to zero without using conditionals.
size_t _evictable_space_mask = std::numeric_limits<size_t>::max();
bool _evictable = false;
region_sanitizer _sanitizer;
uint64_t _id;
eviction_fn _eviction_fn;
region_group::region_heap::handle_type _heap_handle;
private:
struct compaction_lock {
region_impl& _region;
bool _prev;
compaction_lock(region_impl& r)
: _region(r)
, _prev(r._reclaiming_enabled)
{
_region._reclaiming_enabled = false;
}
~compaction_lock() {
_region._reclaiming_enabled = _prev;
}
};
void* alloc_small(allocation_strategy::migrate_fn migrator, segment::size_type size, size_t alignment) {
if (!_active) {
_active = new_segment();
_active_offset = 0;
}
auto desc = object_descriptor(migrator);
auto desc_encoded_size = desc.encoded_size();
size_t obj_offset = align_up(_active_offset + desc_encoded_size, alignment);
if (obj_offset + size > segment::size) {
close_and_open();
return alloc_small(migrator, size, alignment);
}
auto old_active_offset = _active_offset;
auto pos = _active->at<char>(_active_offset);
// Use non-canonical encoding to allow for alignment pad
desc.encode(pos, obj_offset - _active_offset);
_active_offset = obj_offset + size;
_active->record_alloc(_active_offset - old_active_offset);
return pos;
}
template<typename Func>
void for_each_live(segment* seg, Func&& func) {
// scylla-gdb.py:scylla_lsa_segment is coupled with this implementation.
static_assert(std::is_same<void, std::result_of_t<Func(const object_descriptor*, void*)>>::value, "bad Func signature");
auto pos = seg->at<const char>(0);
while (pos < seg->at<const char>(segment::size)) {
auto old_pos = pos;
const auto desc = object_descriptor::decode_forwards(pos);
if (desc.is_live()) {
auto size = desc.live_size(pos);
func(&desc, const_cast<char*>(pos));
pos += size;
} else {
pos = old_pos + desc.dead_size();
}
}
}
void close_active() {
if (!_active) {
return;
}
if (_active_offset < segment::size) {
auto desc = object_descriptor::make_dead(segment::size - _active_offset);
auto pos =_active->at<char>(_active_offset);
desc.encode(pos);
}
llogger.trace("Closing segment {}, used={}, waste={} [B]", _active, _active->occupancy(), segment::size - _active_offset);
_closed_occupancy += _active->occupancy();
_segment_descs.push(shard_segment_pool.descriptor(_active));
_active = nullptr;
}
void free_segment(segment_descriptor& desc) noexcept {
free_segment(shard_segment_pool.segment_from(desc), desc);
}
void free_segment(segment* seg) noexcept {
free_segment(seg, shard_segment_pool.descriptor(seg));
}
void free_segment(segment* seg, segment_descriptor& desc) noexcept {
shard_segment_pool.free_segment(seg, desc);
if (_group) {
_evictable_space -= segment_size;
_group->decrease_usage(_heap_handle, -segment::size);
}
}
segment* new_segment() {
segment* seg = shard_segment_pool.new_segment(this);
if (_group) {
_evictable_space += segment_size;
_group->increase_usage(_heap_handle, segment::size);
}
return seg;
}
void compact(segment* seg, segment_descriptor& desc) {
++_invalidate_counter;
for_each_live(seg, [this] (const object_descriptor* desc, void* obj) {
auto size = desc->live_size(obj);
auto dst = alloc_small(desc->migrator(), size, desc->alignment());
_sanitizer.on_migrate(obj, size, dst);
desc->migrator()->migrate(obj, dst, size);
});
free_segment(seg, desc);
}
void close_and_open() {
segment* new_active = new_segment();
close_active();
_active = new_active;
_active_offset = 0;
}
static uint64_t next_id() {
static std::atomic<uint64_t> id{0};
return id.fetch_add(1);
}
struct degroup_temporarily {
region_impl* impl;
region_group* group;
explicit degroup_temporarily(region_impl* impl)
: impl(impl), group(impl->_group) {
if (group) {
group->del(impl);
}
}
~degroup_temporarily() {
if (group) {
group->add(impl);
}
}
};
public:
explicit region_impl(region* region, region_group* group = nullptr)
: _region(region), _group(group), _id(next_id())
{
_preferred_max_contiguous_allocation = max_managed_object_size;
tracker_instance._impl->register_region(this);
try {
if (group) {
group->add(this);
}
} catch (...) {
tracker_instance._impl->unregister_region(this);
throw;
}
}
virtual ~region_impl() {
_sanitizer.on_region_destruction();
tracker_instance._impl->unregister_region(this);
while (!_segment_descs.empty()) {
auto& desc = _segment_descs.one_of_largest();
_segment_descs.pop_one_of_largest();
assert(desc.is_empty());
free_segment(desc);
}
_closed_occupancy = {};
if (_active) {
assert(_active->is_empty());
free_segment(_active);
_active = nullptr;
}
if (_group) {
_group->del(this);
}
}
region_impl(region_impl&&) = delete;
region_impl(const region_impl&) = delete;
bool empty() const {
return occupancy().used_space() == 0;
}
occupancy_stats occupancy() const {
occupancy_stats total = _non_lsa_occupancy;
total += _closed_occupancy;
if (_active) {
total += _active->occupancy();
}
return total;
}
region_group* group() {
return _group;
}
occupancy_stats compactible_occupancy() const {
return _closed_occupancy;
}
occupancy_stats evictable_occupancy() const {
return occupancy_stats(0, _evictable_space & _evictable_space_mask);
}
void ground_evictable_occupancy() {
_evictable_space_mask = 0;
if (_group) {
_group->decrease_evictable_usage(_heap_handle);
}
}
//
// Returns true if this region can be compacted and compact() will make forward progress,
// so that this will eventually stop:
//
// while (is_compactible()) { compact(); }
//
bool is_compactible() const {
return _reclaiming_enabled
&& (_closed_occupancy.free_space() >= 2 * segment::size)
&& _segment_descs.contains_above_min();
}
bool is_idle_compactible() {
return is_compactible();
}
virtual void* alloc(allocation_strategy::migrate_fn migrator, size_t size, size_t alignment) override {
compaction_lock _(*this);
memory::on_alloc_point();
shard_segment_pool.on_memory_allocation(size);
if (size > max_managed_object_size) {
auto ptr = standard_allocator().alloc(migrator, size, alignment);
// This isn't very acurrate, the correct free_space value would be
// malloc_usable_size(ptr) - size, but there is no way to get
// the exact object size at free.
auto allocated_size = malloc_usable_size(ptr);
_non_lsa_occupancy += occupancy_stats(0, allocated_size);
if (_group) {
_evictable_space += allocated_size;
_group->increase_usage(_heap_handle, allocated_size);
}
shard_segment_pool.update_non_lsa_memory_in_use(allocated_size);
return ptr;
} else {
auto ptr = alloc_small(migrator, (segment::size_type) size, alignment);
_sanitizer.on_allocation(ptr, size);
return ptr;
}
}
private:
void on_non_lsa_free(void* obj) noexcept {
auto allocated_size = malloc_usable_size(obj);
_non_lsa_occupancy -= occupancy_stats(0, allocated_size);
if (_group) {
_evictable_space -= allocated_size;
_group->decrease_usage(_heap_handle, allocated_size);
}
shard_segment_pool.update_non_lsa_memory_in_use(-allocated_size);
}
public:
virtual void free(void* obj) noexcept override {
compaction_lock _(*this);
segment* seg = shard_segment_pool.containing_segment(obj);
if (!seg) {
on_non_lsa_free(obj);
standard_allocator().free(obj);
return;
}
auto pos = reinterpret_cast<const char*>(obj);
auto desc = object_descriptor::decode_backwards(pos);
free(obj, desc.live_size(obj));
}
virtual void free(void* obj, size_t size) noexcept override {
compaction_lock _(*this);
segment* seg = shard_segment_pool.containing_segment(obj);
if (!seg) {
on_non_lsa_free(obj);
standard_allocator().free(obj, size);
return;
}
_sanitizer.on_free(obj, size);
segment_descriptor& seg_desc = shard_segment_pool.descriptor(seg);
auto pos = reinterpret_cast<const char*>(obj);
auto old_pos = pos;
auto desc = object_descriptor::decode_backwards(pos);
auto dead_size = size + (old_pos - pos);
desc = object_descriptor::make_dead(dead_size);
auto npos = const_cast<char*>(pos);
desc.encode(npos);
if (seg != _active) {
_closed_occupancy -= seg->occupancy();
}
seg_desc.record_free(dead_size);
if (seg != _active) {
if (seg_desc.is_empty()) {
_segment_descs.erase(seg_desc);
free_segment(seg, seg_desc);
} else {
_segment_descs.adjust_up(seg_desc);
_closed_occupancy += seg_desc.occupancy();
}
}
}
virtual size_t object_memory_size_in_allocator(const void* obj) const noexcept override {
segment* seg = shard_segment_pool.containing_segment(obj);
if (!seg) {
return standard_allocator().object_memory_size_in_allocator(obj);
} else {
auto pos = reinterpret_cast<const char*>(obj);
auto desc = object_descriptor::decode_backwards(pos);
return desc.encoded_size() + desc.live_size(obj);
}
}
// Merges another region into this region. The other region is made
// to refer to this region.
// Doesn't invalidate references to allocated objects.
void merge(region_impl& other) noexcept {
// degroup_temporarily allocates via binomial_heap::push(), which should not
// fail, because we have a matching deallocation before that and we don't
// allocate between them.
memory::disable_failure_guard dfg;
compaction_lock dct1(*this);
compaction_lock dct2(other);
degroup_temporarily dgt1(this);
degroup_temporarily dgt2(&other);
if (_active && _active->is_empty()) {
shard_segment_pool.free_segment(_active);
_active = nullptr;
}
if (!_active) {
_active = other._active;
other._active = nullptr;
_active_offset = other._active_offset;
if (_active) {
shard_segment_pool.set_region(_active, this);
}
} else {
other.close_active();
}
for (auto& desc : other._segment_descs) {
shard_segment_pool.set_region(desc, this);
}
_segment_descs.merge(other._segment_descs);
_closed_occupancy += other._closed_occupancy;
_non_lsa_occupancy += other._non_lsa_occupancy;
other._closed_occupancy = {};
other._non_lsa_occupancy = {};
// Make sure both regions will notice a future increment
// to the reclaim counter
_invalidate_counter = std::max(_invalidate_counter, other._invalidate_counter);
_sanitizer.merge(other._sanitizer);
other._sanitizer = { };
}
// Returns occupancy of the sparsest compactible segment.
occupancy_stats min_occupancy() const {
if (_segment_descs.empty()) {
return {};
}
return _segment_descs.one_of_largest().occupancy();
}
void compact_single_segment_locked() {
auto& desc = _segment_descs.one_of_largest();
_segment_descs.pop_one_of_largest();
_closed_occupancy -= desc.occupancy();
segment* seg = shard_segment_pool.segment_from(desc);
auto seg_occupancy = desc.occupancy();
llogger.debug("Compacting segment {} from region {}, {}", seg, id(), seg_occupancy);
compact(seg, desc);
shard_segment_pool.on_segment_compaction(seg_occupancy.used_space());
}
// Compacts a single segment
void compact() {
compaction_lock _(*this);
compact_single_segment_locked();
}
void migrate_segment(segment* src, segment_descriptor& src_desc, segment* dst, segment_descriptor& dst_desc) {
++_invalidate_counter;
size_t segment_size;
if (src != _active) {
_segment_descs.erase(src_desc);
_segment_descs.push(dst_desc);
segment_size = segment::size;
} else {
_active = dst;
segment_size = _active_offset;
}
size_t offset = 0;
while (offset < segment_size) {
auto pos = src->at<const char>(offset);
auto dpos = dst->at<char>(offset);
auto old_pos = pos;
auto desc = object_descriptor::decode_forwards(pos);
// Keep same size as before to maintain alignment
desc.encode(dpos, pos - old_pos);
if (desc.is_live()) {
offset += pos - old_pos;
auto size = desc.live_size(pos);
offset += size;
_sanitizer.on_migrate(pos, size, dpos);
desc.migrator()->migrate(const_cast<char*>(pos), dpos, size);
} else {
offset += desc.dead_size();
}
}
shard_segment_pool.on_segment_migration();
}
// Compacts everything. Mainly for testing.
// Invalidates references to allocated objects.
void full_compaction() {
compaction_lock _(*this);
llogger.debug("Full compaction, {}", occupancy());
close_and_open();
segment_descriptor_hist all;
std::swap(all, _segment_descs);
_closed_occupancy = {};
while (!all.empty()) {
auto& desc = all.one_of_largest();
all.pop_one_of_largest();
compact(shard_segment_pool.segment_from(desc), desc);
}
llogger.debug("Done, {}", occupancy());
}
allocation_strategy& allocator() {
return *this;
}
uint64_t id() const {
return _id;
}
// Returns true if this pool is evictable, so that evict_some() can be called.
bool is_evictable() const {
return _evictable && _reclaiming_enabled;
}
memory::reclaiming_result evict_some() {
++_invalidate_counter;
return _eviction_fn();
}
void make_not_evictable() {
_evictable = false;
_eviction_fn = {};
}
void make_evictable(eviction_fn fn) {
_evictable = true;
_eviction_fn = std::move(fn);
}
const eviction_fn& evictor() const {
return _eviction_fn;
}
friend class region;
friend class region_group;
friend class region_group::region_evictable_occupancy_ascending_less_comparator;
};
inline void
region_group_binomial_group_sanity_check(const region_group::region_heap& bh) {
#ifdef SEASTAR_DEBUG
bool failed = false;
size_t last = std::numeric_limits<size_t>::max();
for (auto b = bh.ordered_begin(); b != bh.ordered_end(); b++) {
auto t = (*b)->evictable_occupancy().total_space();
if (!(t <= last)) {
failed = true;
break;
}
last = t;
}
if (!failed) {
return;
}
printf("Sanity checking FAILED, size %ld\n", bh.size());
for (auto b = bh.ordered_begin(); b != bh.ordered_end(); b++) {
auto r = (*b);
auto t = r->evictable_occupancy().total_space();
printf(" r = %p (id=%ld), occupancy = %ld\n",r, r->id(), t);
}
assert(0);
#endif
}
void tracker::set_reclamation_step(size_t step_in_segments) {
_impl->set_reclamation_step(step_in_segments);
}
size_t tracker::reclamation_step() const {
return _impl->reclamation_step();
}
void tracker::enable_abort_on_bad_alloc() {
return _impl->enable_abort_on_bad_alloc();
}
bool tracker::should_abort_on_bad_alloc() {
return _impl->should_abort_on_bad_alloc();
}
memory::reclaiming_result tracker::reclaim() {
return reclaim(_impl->reclamation_step() * segment::size)
? memory::reclaiming_result::reclaimed_something
: memory::reclaiming_result::reclaimed_nothing;
}
bool
region_group::region_evictable_occupancy_ascending_less_comparator::operator()(region_impl* r1, region_impl* r2) const {
return r1->evictable_occupancy().total_space() < r2->evictable_occupancy().total_space();
}
region::region()
: _impl(make_shared<impl>(this))
{ }
region::region(region_group& group)
: _impl(make_shared<impl>(this, &group)) {
}
region_impl& region::get_impl() {
return *static_cast<region_impl*>(_impl.get());
}
const region_impl& region::get_impl() const {
return *static_cast<const region_impl*>(_impl.get());
}
region::region(region&& other) {
this->_impl = std::move(other._impl);
get_impl()._region = this;
}
region& region::operator=(region&& other) {
this->_impl = std::move(other._impl);
get_impl()._region = this;
return *this;
}
region::~region() {
}
occupancy_stats region::occupancy() const {
return get_impl().occupancy();
}
region_group* region::group() {
return get_impl().group();
}
void region::merge(region& other) noexcept {
if (_impl != other._impl) {
get_impl().merge(other.get_impl());
other._impl = _impl;
}
}
void region::full_compaction() {
get_impl().full_compaction();
}
memory::reclaiming_result region::evict_some() {
if (get_impl().is_evictable()) {
return get_impl().evict_some();
}
return memory::reclaiming_result::reclaimed_nothing;
}
void region::make_evictable(eviction_fn fn) {
get_impl().make_evictable(std::move(fn));
}
void region::ground_evictable_occupancy() {
get_impl().ground_evictable_occupancy();
}
const eviction_fn& region::evictor() const {
return get_impl().evictor();
}
std::ostream& operator<<(std::ostream& out, const occupancy_stats& stats) {
return out << sprint("%.2f%%, %d / %d [B]",
stats.used_fraction() * 100, stats.used_space(), stats.total_space());
}
occupancy_stats tracker::impl::region_occupancy() {
reclaiming_lock _(*this);
occupancy_stats total{};
for (auto&& r: _regions) {
total += r->occupancy();
}
return total;
}
occupancy_stats tracker::impl::occupancy() {
reclaiming_lock _(*this);
auto occ = region_occupancy();
{
auto s = shard_segment_pool.free_segments() * segment::size;
occ += occupancy_stats(s, s);
}
return occ;
}
size_t tracker::impl::non_lsa_used_space() {
auto free_space_in_lsa = shard_segment_pool.free_segments() * segment_size;
return memory::stats().allocated_memory() - region_occupancy().total_space() - free_space_in_lsa;
}
void tracker::impl::reclaim_all_free_segments()
{
llogger.debug("Reclaiming all free segments");
shard_segment_pool.reclaim_all_free_segments();
llogger.debug("Reclamation done");
}
void tracker::impl::full_compaction() {
reclaiming_lock _(*this);
llogger.debug("Full compaction on all regions, {}", region_occupancy());
for (region_impl* r : _regions) {
if (r->reclaiming_enabled()) {
r->full_compaction();
}
}
llogger.debug("Compaction done, {}", region_occupancy());
}
static void reclaim_from_evictable(region::impl& r, size_t target_mem_in_use) {
while (true) {
auto deficit = shard_segment_pool.total_memory_in_use() - target_mem_in_use;
auto occupancy = r.occupancy();
auto used = occupancy.used_space();
if (used == 0) {
break;
}
// Before attempting segment compaction, try to evict at least deficit and one segment more so that
// for workloads in which eviction order matches allocation order we will reclaim full segments
// without needing to perform expensive compaction.
auto used_target = used - std::min(used, deficit + segment::size);
llogger.debug("Evicting {} bytes from region {}, occupancy={}", used - used_target, r.id(), r.occupancy());
while (r.occupancy().used_space() > used_target || !r.is_compactible()) {
if (r.evict_some() == memory::reclaiming_result::reclaimed_nothing) {
if (r.is_compactible()) { // Need to make forward progress in case there is nothing to evict.
break;
}
llogger.debug("Unable to evict more, evicted {} bytes", used - r.occupancy().used_space());
return;
}
if (shard_segment_pool.total_memory_in_use() <= target_mem_in_use) {
llogger.debug("Target met after evicting {} bytes", used - r.occupancy().used_space());
return;
}
if (r.empty()) {
return;
}
}
llogger.debug("Compacting after evicting {} bytes", used - r.occupancy().used_space());
r.compact();
}
}
struct reclaim_timer {
clock::time_point start;
bool enabled;
reclaim_timer() {
if (timing_logger.is_enabled(logging::log_level::debug)) {
start = clock::now();
enabled = true;
} else {
enabled = false;
}
}
~reclaim_timer() {
if (enabled) {
auto duration = clock::now() - start;
timing_logger.debug("Reclamation cycle took {} us.",
std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(duration).count());
}
}
void stop(size_t released) {
if (enabled) {
enabled = false;
auto duration = clock::now() - start;
auto bytes_per_second = static_cast<float>(released) / std::chrono::duration_cast<std::chrono::duration<float>>(duration).count();
timing_logger.debug("Reclamation cycle took {} us. Reclamation rate = {} MiB/s",
std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(duration).count(),
sprint("%.3f", bytes_per_second / (1024*1024)));
}
}
};
reactor::idle_cpu_handler_result tracker::impl::compact_on_idle(reactor::work_waiting_on_reactor check_for_work) {
if (!_reclaiming_enabled) {
return reactor::idle_cpu_handler_result::no_more_work;
}
reclaiming_lock rl(*this);
if (_regions.empty()) {
return reactor::idle_cpu_handler_result::no_more_work;
}
segment_pool::reservation_goal open_emergency_pool(shard_segment_pool, 0);
auto cmp = [] (region::impl* c1, region::impl* c2) {
if (c1->is_idle_compactible() != c2->is_idle_compactible()) {
return !c1->is_idle_compactible();
}
return c2->min_occupancy() < c1->min_occupancy();
};
boost::range::make_heap(_regions, cmp);
while (!check_for_work()) {
boost::range::pop_heap(_regions, cmp);
region::impl* r = _regions.back();
if (!r->is_idle_compactible()) {
return reactor::idle_cpu_handler_result::no_more_work;
}
r->compact();
boost::range::push_heap(_regions, cmp);
}
return reactor::idle_cpu_handler_result::interrupted_by_higher_priority_task;
}
size_t tracker::impl::reclaim(size_t memory_to_release) {
// Reclamation steps:
// 1. Try to release free segments from segment pool and emergency reserve.
// 2. Compact used segments and/or evict data.
if (!_reclaiming_enabled) {
return 0;
}
reclaiming_lock rl(*this);
reclaim_timer timing_guard;
size_t mem_released;
{
reclaiming_lock rl(*this);
constexpr auto max_bytes = std::numeric_limits<size_t>::max() - segment::size;
auto segments_to_release = align_up(std::min(max_bytes, memory_to_release), segment::size) >> segment::size_shift;
auto nr_released = shard_segment_pool.reclaim_segments(segments_to_release);
mem_released = nr_released * segment::size;
if (mem_released > memory_to_release) {
return memory_to_release;
}
}
auto compacted = compact_and_evict_locked(memory_to_release - mem_released);
// compact_and_evict_locked() will not return segments to the standard allocator,
// so do it here:
auto nr_released = shard_segment_pool.reclaim_segments(compacted / segment::size);
return mem_released + nr_released * segment::size;
}
size_t tracker::impl::compact_and_evict(size_t memory_to_release) {
if (!_reclaiming_enabled) {
return 0;
}
reclaiming_lock rl(*this);
reclaim_timer timing_guard;
return compact_and_evict_locked(memory_to_release);
}
size_t tracker::impl::compact_and_evict_locked(size_t memory_to_release) {
//
// Algorithm outline.
//
// Regions are kept in a max-heap ordered so that regions with
// sparser segments are picked first. Non-compactible regions will be
// picked last. In each iteration we try to release one whole segment from
// the region which has the sparsest segment. We do it until we released
// enough segments or there are no more regions we can compact.
//
// When compaction is not sufficient to reclaim space, we evict data from
// evictable regions.
//
// This may run synchronously with allocation, so we should not allocate
// memory, otherwise we may get std::bad_alloc. Currently we only allocate
// in the logger when debug level is enabled. It's disabled during normal
// operation. Having it is still valuable during testing and in most cases
// should work just fine even if allocates.
size_t mem_released = 0;
size_t mem_in_use = shard_segment_pool.total_memory_in_use();
auto target_mem = mem_in_use - std::min(mem_in_use, memory_to_release - mem_released);
llogger.debug("Compacting, requested {} bytes, {} bytes in use, target is {}",
memory_to_release, mem_in_use, target_mem);
// Allow dipping into reserves while compacting
segment_pool::reservation_goal open_emergency_pool(shard_segment_pool, 0);
auto cmp = [] (region::impl* c1, region::impl* c2) {
if (c1->is_compactible() != c2->is_compactible()) {
return !c1->is_compactible();
}
return c2->min_occupancy() < c1->min_occupancy();
};
boost::range::make_heap(_regions, cmp);
if (llogger.is_enabled(logging::log_level::debug)) {
llogger.debug("Occupancy of regions:");
for (region::impl* r : _regions) {
llogger.debug(" - {}: min={}, avg={}", r->id(), r->min_occupancy(), r->compactible_occupancy());
}
}
while (shard_segment_pool.total_memory_in_use() > target_mem) {
boost::range::pop_heap(_regions, cmp);
region::impl* r = _regions.back();
if (!r->is_compactible()) {
llogger.trace("Unable to release segments, no compactible pools.");
break;
}
// Prefer evicting if average occupancy ratio is above the compaction threshold to avoid
// overhead of compaction in workloads where allocation order matches eviction order, where
// we can reclaim memory by eviction only. In some cases the cost of compaction on allocation
// would be higher than the cost of repopulating the region with evicted items.
if (r->is_evictable() && r->occupancy().used_space() >= max_used_space_ratio_for_compaction * r->occupancy().total_space()) {
reclaim_from_evictable(*r, target_mem);
} else {
r->compact();
}
boost::range::push_heap(_regions, cmp);
}
auto released_during_compaction = mem_in_use - shard_segment_pool.total_memory_in_use();
if (shard_segment_pool.total_memory_in_use() > target_mem) {
llogger.debug("Considering evictable regions.");
// FIXME: Fair eviction
for (region::impl* r : _regions) {
if (r->is_evictable()) {
reclaim_from_evictable(*r, target_mem);
if (shard_segment_pool.total_memory_in_use() <= target_mem) {
break;
}
}
}
}
mem_released += mem_in_use - shard_segment_pool.total_memory_in_use();
llogger.debug("Released {} bytes (wanted {}), {} during compaction",
mem_released, memory_to_release, released_during_compaction);
return mem_released;
}
#ifndef SEASTAR_DEFAULT_ALLOCATOR
bool segment_pool::migrate_segment(segment* src, segment* dst)
{
auto& src_desc = descriptor(src);
auto& dst_desc = descriptor(dst);
llogger.debug("Migrating segment {} to {} (region @{})",
src, dst, src_desc._region);
{
if (!src_desc._region->reclaiming_enabled()) {
llogger.trace("Cannot move segment {}", src);
return false;
}
assert(!dst_desc._region);
dst_desc._free_space = src_desc._free_space;
src_desc._region->migrate_segment(src, src_desc, dst, dst_desc);
assert(_lsa_owned_segments_bitmap.test(idx_from_segment(src)));
}
_lsa_free_segments_bitmap.set(idx_from_segment(src));
_lsa_free_segments_bitmap.clear(idx_from_segment(dst));
dst_desc._region = src_desc._region;
src_desc._region = nullptr;
return true;
}
#endif
void tracker::impl::register_region(region::impl* r) {
reclaiming_lock _(*this);
_regions.push_back(r);
llogger.debug("Registered region @{} with id={}", r, r->id());
}
void tracker::impl::unregister_region(region::impl* r) noexcept {
reclaiming_lock _(*this);
llogger.debug("Unregistering region, id={}", r->id());
_regions.erase(std::remove(_regions.begin(), _regions.end(), r), _regions.end());
}
tracker::impl::impl() {
namespace sm = seastar::metrics;
_metrics.add_group("lsa", {
sm::make_gauge("total_space_bytes", [this] { return region_occupancy().total_space(); },
sm::description("Holds a current size of allocated memory in bytes.")),
sm::make_gauge("used_space_bytes", [this] { return region_occupancy().used_space(); },
sm::description("Holds a current amount of used memory in bytes.")),
sm::make_gauge("small_objects_total_space_bytes", [this] { return region_occupancy().total_space() - shard_segment_pool.non_lsa_memory_in_use(); },
sm::description("Holds a current size of \"small objects\" memory region in bytes.")),
sm::make_gauge("small_objects_used_space_bytes", [this] { return region_occupancy().used_space() - shard_segment_pool.non_lsa_memory_in_use(); },
sm::description("Holds a current amount of used \"small objects\" memory in bytes.")),
sm::make_gauge("large_objects_total_space_bytes", [this] { return shard_segment_pool.non_lsa_memory_in_use(); },
sm::description("Holds a current size of allocated non-LSA memory.")),
sm::make_gauge("non_lsa_used_space_bytes", [this] { return non_lsa_used_space(); },
sm::description("Holds a current amount of used non-LSA memory.")),
sm::make_gauge("free_space", [this] { return shard_segment_pool.unreserved_free_segments() * segment_size; },
sm::description("Holds a current amount of free memory that is under lsa control.")),
sm::make_gauge("occupancy", [this] { return region_occupancy().used_fraction() * 100; },
sm::description("Holds a current portion (in percents) of the used memory.")),
sm::make_derive("segments_migrated", [this] { return shard_segment_pool.statistics().segments_migrated; },
sm::description("Counts a number of migrated segments.")),
sm::make_derive("segments_compacted", [this] { return shard_segment_pool.statistics().segments_compacted; },
sm::description("Counts a number of compacted segments.")),
sm::make_derive("memory_compacted", [this] { return shard_segment_pool.statistics().memory_compacted; },
sm::description("Counts number of bytes which were copied as part of segment compaction.")),
sm::make_derive("memory_allocated", [this] { return shard_segment_pool.statistics().memory_allocated; },
sm::description("Counts number of bytes which were requested from LSA allocator.")),
});
}
tracker::impl::~impl() {
if (!_regions.empty()) {
for (auto&& r : _regions) {
llogger.error("Region with id={} not unregistered!", r->id());
}
abort();
}
}
region_group_reclaimer region_group::no_reclaimer;
uint64_t region_group::top_region_evictable_space() const {
return _regions.empty() ? 0 : _regions.top()->evictable_occupancy().total_space();
}
region* region_group::get_largest_region() {
if (!_maximal_rg || _maximal_rg->_regions.empty()) {
return nullptr;
}
return _maximal_rg->_regions.top()->_region;
}
void
region_group::add(region_group* child) {
child->_subgroup_heap_handle = _subgroups.push(child);
update(child->_total_memory);
}
void
region_group::del(region_group* child) {
_subgroups.erase(child->_subgroup_heap_handle);
update(-child->_total_memory);
}
void
region_group::add(region_impl* child) {
child->_heap_handle = _regions.push(child);
region_group_binomial_group_sanity_check(_regions);
update(child->occupancy().total_space());
}
void
region_group::del(region_impl* child) {
_regions.erase(child->_heap_handle);
region_group_binomial_group_sanity_check(_regions);
update(-child->occupancy().total_space());
}
bool
region_group::execution_permitted() noexcept {
return do_for_each_parent(this, [] (auto rg) {
return rg->under_pressure() ? stop_iteration::yes : stop_iteration::no;
}) == nullptr;
}
future<>
region_group::start_releaser(scheduling_group deferred_work_sg) {
return with_scheduling_group(deferred_work_sg, [this] {
return later().then([this] {
return repeat([this] () noexcept {
if (_shutdown_requested) {
return make_ready_future<stop_iteration>(stop_iteration::yes);
}
if (!_blocked_requests.empty() && execution_permitted()) {
auto req = std::move(_blocked_requests.front());
_blocked_requests.pop_front();
req->allocate();
return make_ready_future<stop_iteration>(stop_iteration::no);
} else {
// Block reclaiming to prevent signal() from being called by reclaimer inside wait()
// FIXME: handle allocation failures (not very likely) like allocating_section does
tracker_reclaimer_lock rl;
return _relief.wait().then([] {
return stop_iteration::no;
});
}
});
});
});
}
region_group::region_group(region_group *parent, region_group_reclaimer& reclaimer,
scheduling_group deferred_work_sg)
: _parent(parent)
, _reclaimer(reclaimer)
, _releaser(reclaimer_can_block() ? start_releaser(deferred_work_sg) : make_ready_future<>())
{
if (_parent) {
_parent->add(this);
}
}
bool region_group::reclaimer_can_block() const {
return _reclaimer.throttle_threshold() != std::numeric_limits<size_t>::max();
}
void region_group::notify_relief() {
_relief.signal();
for (region_group* child : _subgroups) {
child->notify_relief();
}
}
void region_group::update(ssize_t delta) {
// Most-enclosing group which was relieved.
region_group* top_relief = nullptr;
do_for_each_parent(this, [&top_relief, delta] (region_group* rg) mutable {
rg->update_maximal_rg();
rg->_total_memory += delta;
if (rg->_total_memory >= rg->_reclaimer.soft_limit_threshold()) {
rg->_reclaimer.notify_soft_pressure();
} else {
rg->_reclaimer.notify_soft_relief();
}
if (rg->_total_memory > rg->_reclaimer.throttle_threshold()) {
rg->_reclaimer.notify_pressure();
} else if (rg->_reclaimer.under_pressure()) {
rg->_reclaimer.notify_relief();
top_relief = rg;
}
return stop_iteration::no;
});
if (top_relief) {
top_relief->notify_relief();
}
}
allocating_section::guard::guard()
: _prev(shard_segment_pool.emergency_reserve_max())
{ }
allocating_section::guard::~guard() {
shard_segment_pool.set_emergency_reserve_max(_prev);
}
#ifndef SEASTAR_DEFAULT_ALLOCATOR
void allocating_section::reserve() {
shard_segment_pool.set_emergency_reserve_max(std::max(_lsa_reserve, _minimum_lsa_emergency_reserve));
shard_segment_pool.refill_emergency_reserve();
while (true) {
size_t free = memory::stats().free_memory();
if (free >= _std_reserve) {
break;
}
if (!tracker_instance.reclaim(_std_reserve - free)) {
throw std::bad_alloc();
}
}
shard_segment_pool.clear_allocation_failure_flag();
}
void allocating_section::on_alloc_failure(logalloc::region& r) {
r.allocator().invalidate_references();
if (shard_segment_pool.allocation_failure_flag()) {
_lsa_reserve *= 2; // FIXME: decay?
llogger.debug("LSA allocation failure, increasing reserve in section {} to {} segments", this, _lsa_reserve);
} else {
_std_reserve *= 2; // FIXME: decay?
llogger.debug("Standard allocator failure, increasing head-room in section {} to {} [B]", this, _std_reserve);
}
reserve();
}
#else
void allocating_section::reserve() {
}
void allocating_section::on_alloc_failure(logalloc::region&) {
throw std::bad_alloc();
}
#endif
void allocating_section::set_lsa_reserve(size_t reserve) {
_lsa_reserve = reserve;
}
void allocating_section::set_std_reserve(size_t reserve) {
_std_reserve = reserve;
}
void region_group::on_request_expiry::operator()(std::unique_ptr<allocating_function>& func) noexcept {
func->fail(std::make_exception_ptr(timed_out_error()));
}
future<> prime_segment_pool(size_t available_memory, size_t min_free_memory) {
return smp::invoke_on_all([=] {
shard_segment_pool.prime(available_memory, min_free_memory);
});
}
uint64_t memory_allocated() {
return shard_segment_pool.statistics().memory_allocated;
}
uint64_t memory_compacted() {
return shard_segment_pool.statistics().memory_compacted;
}
}
// Orders segments by free space, assuming all segments have the same size.
// This avoids using the occupancy, which entails extra division operations.
template<>
size_t hist_key<logalloc::segment_descriptor>(const logalloc::segment_descriptor& desc) {
return desc._free_space;
}
```
|
Malcolm Douglas Farr (March 24, 1884 – April 28, 1956) was an American businessman and politician.
Farr was born in Kenosha, Wisconsin. He went to the Kenosha public schools and to the Northwestern Military Academy. He was the president of the Independent Ice Company and the Sunshine Coal Company. Farr served in the Wisconsin Assembly in 1921 and 1922 as a Republican. He then moved to Phoenix, Arizona and was the chief executive of a pipe manufacturing company. In 1946, Farr moved to Costa Mesa, California where he died.
Notes
1884 births
1956 deaths
Politicians from Kenosha, Wisconsin
People from Costa Mesa, California
Businesspeople from Phoenix, Arizona
Businesspeople from Wisconsin
20th-century American politicians
20th-century American businesspeople
Republican Party members of the Wisconsin State Assembly
|
```xml
/**
* @license
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import {MDCComponent} from '@material/base/component';
import {MDCTextFieldHelperTextAdapter} from './adapter';
import {MDCTextFieldHelperTextFoundation} from './foundation';
/** MDC Text Field Helper Text Factory */
export type MDCTextFieldHelperTextFactory =
(el: HTMLElement, foundation?: MDCTextFieldHelperTextFoundation) =>
MDCTextFieldHelperText;
/** MDC Text Field Helper Text */
export class MDCTextFieldHelperText extends
MDCComponent<MDCTextFieldHelperTextFoundation> {
static override attachTo(root: HTMLElement): MDCTextFieldHelperText {
return new MDCTextFieldHelperText(root);
}
// Provided for access by MDCTextField component
get foundationForTextField(): MDCTextFieldHelperTextFoundation {
return this.foundation;
}
override getDefaultFoundation() {
// DO NOT INLINE this variable. For backward compatibility, foundations take
// a Partial<MDCFooAdapter>. To ensure we don't accidentally omit any
// methods, we need a separate, strongly typed adapter variable.
// tslint:disable:object-literal-sort-keys Methods should be in the same order as the adapter interface.
const adapter: MDCTextFieldHelperTextAdapter = {
addClass: (className) => {
this.root.classList.add(className);
},
removeClass: (className) => {
this.root.classList.remove(className);
},
hasClass: (className) => this.root.classList.contains(className),
getAttr: (attr) => this.root.getAttribute(attr),
setAttr: (attr, value) => {
this.safeSetAttribute(this.root, attr, value);
},
removeAttr: (attr) => {
this.root.removeAttribute(attr);
},
setContent: (content) => {
this.root.textContent = content;
},
};
// tslint:enable:object-literal-sort-keys
return new MDCTextFieldHelperTextFoundation(adapter);
}
}
```
|
```yaml
reader:
name: ami_l1b
short_name: AMI L1b
long_name: GEO-KOMPSAT-2 AMI Level 1b
description: >
GEO-KOMPSAT-2 AMI Level 1b data reader in the NetCDF4 format. The file format and
instrument are described on KMA's website
`here <path_to_url`_.
sensors: [ami]
status: Beta
supports_fsspec: true
default_channels:
reader: !!python/name:satpy.readers.yaml_reader.FileYAMLReader
# file pattern keys to sort files by with 'satpy.utils.group_files'
group_keys: ['start_time', 'platform_shortname', 'sensor', 'sector_info']
file_types:
# Example: gk2a_ami_le1b_ir087_fd020ge_201901260310.nc
# Below list is alphabetical
ir087:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_ir087_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
ir096:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_ir096_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
ir105:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_ir105_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
ir112:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_ir112_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
ir123:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_ir123_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
ir133:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_ir133_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
nr013:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_nr013_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
nr016:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_nr016_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
sw038:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_sw038_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
vi004:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_vi004_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
vi005:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_vi005_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
vi006:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_vi006_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
vi008:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_vi008_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
wv063:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_wv063_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
wv069:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_wv069_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
wv073:
file_reader: !!python/name:satpy.readers.ami_l1b.AMIL1bNetCDF
file_patterns: ['{platform_shortname:4s}_{sensor:3s}_le1b_wv073_{sector_info:2s}{res_info:s}_{start_time:%Y%m%d%H%M}.nc']
datasets:
# Below list is ordered the same as the table:
# path_to_url
C01:
name: VI004
wavelength: [0.450, 0.470, 0.490]
resolution: 1000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavelength
units: W m-2 um-1 sr-1
reflectance:
standard_name: toa_bidirectional_reflectance
units: "%"
file_type: vi004
file_key: image_pixel_values
C02:
name: VI005
wavelength: [0.495, 0.509, 0.523]
resolution: 1000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavelength
units: W m-2 um-1 sr-1
reflectance:
standard_name: toa_bidirectional_reflectance
units: "%"
file_type: vi005
file_key: image_pixel_values
C03:
name: VI006
wavelength: [0.599, 0.639, 0.679]
resolution: 500
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavelength
units: W m-2 um-1 sr-1
reflectance:
standard_name: toa_bidirectional_reflectance
units: "%"
file_type: vi006
file_key: image_pixel_values
C04:
name: VI008
wavelength: [0.846, 0.863, 0.880]
resolution: 1000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavelength
units: W m-2 um-1 sr-1
reflectance:
standard_name: toa_bidirectional_reflectance
units: "%"
file_type: vi008
file_key: image_pixel_values
C05:
name: NR013
wavelength: [1.363, 1.37, 1.377]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavelength
units: W m-2 um-1 sr-1
reflectance:
standard_name: toa_bidirectional_reflectance
units: "%"
file_type: nr013
file_key: image_pixel_values
C06:
name: NR016
wavelength: [1.590, 1.61, 1.630]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavelength
units: W m-2 um-1 sr-1
reflectance:
standard_name: toa_bidirectional_reflectance
units: "%"
file_type: nr016
file_key: image_pixel_values
C07:
name: SW038
wavelength: [3.74, 3.83, 3.92]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavenumber
units: mW m-2 sr-1 (cm-1)-1
brightness_temperature:
standard_name: toa_brightness_temperature
units: K
file_type: sw038
file_key: image_pixel_values
C08:
name: WV063
wavelength: [5.79, 6.21, 6.63]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavenumber
units: mW m-2 sr-1 (cm-1)-1
brightness_temperature:
standard_name: toa_brightness_temperature
units: K
file_type: wv063
file_key: image_pixel_values
C09:
name: WV069
wavelength: [6.74, 6.94, 7.14]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavenumber
units: mW m-2 sr-1 (cm-1)-1
brightness_temperature:
standard_name: toa_brightness_temperature
units: K
file_type: wv069
file_key: image_pixel_values
C10:
name: WV073
wavelength: [7.24, 7.33, 7.42]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavenumber
units: mW m-2 sr-1 (cm-1)-1
brightness_temperature:
standard_name: toa_brightness_temperature
units: K
file_type: wv073
file_key: image_pixel_values
C11:
name: IR087
wavelength: [8.415, 8.59, 8.765]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavenumber
units: mW m-2 sr-1 (cm-1)-1
brightness_temperature:
standard_name: toa_brightness_temperature
units: K
file_type: ir087
file_key: image_pixel_values
C12:
name: IR096
wavelength: [9.43, 9.62, 9.81]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavenumber
units: mW m-2 sr-1 (cm-1)-1
brightness_temperature:
standard_name: toa_brightness_temperature
units: K
file_type: ir096
file_key: image_pixel_values
C13:
name: IR105
wavelength: [10.115, 10.35, 10.585]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavenumber
units: mW m-2 sr-1 (cm-1)-1
brightness_temperature:
standard_name: toa_brightness_temperature
units: K
file_type: ir105
file_key: image_pixel_values
C14:
name: IR112
wavelength: [10.90, 11.23, 11.56]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavenumber
units: mW m-2 sr-1 (cm-1)-1
brightness_temperature:
standard_name: toa_brightness_temperature
units: K
file_type: ir112
file_key: image_pixel_values
C15:
name: IR123
wavelength: [11.805, 12.36, 12.915]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavenumber
units: mW m-2 sr-1 (cm-1)-1
brightness_temperature:
standard_name: toa_brightness_temperature
units: K
file_type: ir123
file_key: image_pixel_values
C16:
name: IR133
wavelength: [13.005, 13.29, 13.575]
resolution: 2000
calibration:
counts:
standard_name: counts
units: 1
radiance:
standard_name: toa_outgoing_radiance_per_unit_wavenumber
units: mW m-2 sr-1 (cm-1)-1
brightness_temperature:
standard_name: toa_brightness_temperature
units: K
file_type: ir133
file_key: image_pixel_values
```
|
```java
/*
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing,
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* specific language governing permissions and limitations
*/
package org.apache.pulsar.testclient;
import picocli.CommandLine.ITypeConverter;
import picocli.CommandLine.TypeConversionException;
public class PositiveNumberParameterConvert implements ITypeConverter<Integer> {
@Override
public Integer convert(String value) {
int result = Integer.parseInt(value);
if (result <= 0) {
throw new TypeConversionException("Parameter should be > 0 (found " + value + ")");
}
return result;
}
}
```
|
```xml
import { IContext } from '../../../connectionResolver';
const commentQueries = {
async clientPortalComments(
_root,
{ typeId, type }: { typeId: string; type: string },
{ models }: IContext
) {
return models.Comments.find({ typeId, type });
}
};
export default commentQueries;
```
|
```rust
use std::fmt;
/// Enumeration of HTTP status classes.
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum StatusClass {
/// Indicates a provisional response: a status code of 1XX.
Informational,
/// Indicates that a request has succeeded: a status code of 2XX.
Success,
/// Indicates that further action needs to be taken by the user agent in
/// order to fulfill the request: a status code of 3XX.
Redirection,
/// Intended for cases in which the client seems to have erred: a status
/// code of 4XX.
ClientError,
/// Indicates cases in which the server is aware that it has erred or is
/// incapable of performing the request: a status code of 5XX.
ServerError,
/// Indicates that the status code is nonstandard and unknown: all other
/// status codes.
Unknown
}
macro_rules! class_check_fn {
($func:ident, $type:expr, $variant:ident) => (
/// Returns `true` if `self` is a `StatusClass` of
#[doc=$type]
/// Returns `false` otherwise.
#[inline(always)]
pub fn $func(&self) -> bool {
*self == StatusClass::$variant
}
)
}
impl StatusClass {
class_check_fn!(is_informational, "`Informational` (1XX).", Informational);
class_check_fn!(is_success, "`Success` (2XX).", Success);
class_check_fn!(is_redirection, "`Redirection` (3XX).", Redirection);
class_check_fn!(is_client_error, "`ClientError` (4XX).", ClientError);
class_check_fn!(is_server_error, "`ServerError` (5XX).", ServerError);
class_check_fn!(is_unknown, "`Unknown`.", Unknown);
}
/// Structure representing an HTTP status: an integer code.
///
/// A `Status` should rarely be created directly. Instead, an associated
/// constant should be used; one is declared for every status defined in the
/// HTTP standard. If a custom status code _must_ be created, note that it is
/// not possible to set a custom reason phrase.
///
/// ```rust
/// # extern crate rocket;
/// use rocket::http::Status;
///
/// // Create a status from a known constant.
/// let ok = Status::Ok;
/// assert_eq!(ok.code, 200);
/// assert_eq!(ok.reason(), Some("OK"));
///
/// let not_found = Status::NotFound;
/// assert_eq!(not_found.code, 404);
/// assert_eq!(not_found.reason(), Some("Not Found"));
///
/// // Or from a status code: `reason()` returns the phrase when known.
/// let gone = Status::new(410);
/// assert_eq!(gone.code, 410);
/// assert_eq!(gone.reason(), Some("Gone"));
///
/// // `reason()` returns `None` when unknown.
/// let custom = Status::new(599);
/// assert_eq!(custom.code, 599);
/// assert_eq!(custom.reason(), None);
/// ```
///
/// # Responding
///
/// To set a custom `Status` on a response, use a [`response::status`]
/// responder, which enforces correct status-based responses. Alternatively,
/// respond with `(Status, T)` where `T: Responder`, but beware that the
/// response may be invalid if it requires additional headers.
///
/// ```rust
/// # extern crate rocket;
/// # use rocket::get;
/// use rocket::http::Status;
///
/// #[get("/")]
/// fn index() -> (Status, &'static str) {
/// (Status::NotFound, "Hey, there's no index!")
/// }
/// ```
///
/// [`response::status`]: ../response/status/index.html
///
/// # (De)serialization
///
/// `Status` is both `Serialize` and `Deserialize`, represented as a `u16`. For
/// example, [`Status::Ok`] (de)serializes from/to `200`. Any integer in the
/// range `[100, 600)` is allowed to deserialize into a `Status`.`
///
/// ```rust
/// # #[cfg(feature = "serde")] mod serde_impl {
/// # use serde as serde;
/// use serde::{Serialize, Deserialize};
/// use rocket::http::Status;
///
/// #[derive(Deserialize, Serialize)]
/// # #[serde(crate = "serde")]
/// struct Foo {
/// status: Status,
/// }
/// # }
/// ```
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Status {
/// The HTTP status code associated with this status.
pub code: u16,
}
impl Default for Status {
fn default() -> Self {
Status::Ok
}
}
macro_rules! ctrs {
($($code:expr, $code_str:expr, $name:ident => $reason:expr),+) => {
$(
#[doc="[`Status`] with code <b>"]
#[doc=$code_str]
#[doc="</b>."]
#[allow(non_upper_case_globals)]
pub const $name: Status = Status { code: $code };
)+
/// Creates a new `Status` with `code`. This should be used _only_ to
/// construct non-standard HTTP statuses. Use an associated constant for
/// standard statuses.
///
/// # Example
///
/// Create a custom `299` status:
///
/// ```rust
/// # extern crate rocket;
/// use rocket::http::Status;
///
/// let custom = Status::new(299);
/// assert_eq!(custom.code, 299);
/// ```
pub const fn new(code: u16) -> Status {
Status { code }
}
/// Returns the class of a given status.
///
/// # Example
///
/// ```rust
/// # extern crate rocket;
/// use rocket::http::{Status, StatusClass};
///
/// let processing = Status::Processing;
/// assert_eq!(processing.class(), StatusClass::Informational);
///
/// let ok = Status::Ok;
/// assert_eq!(ok.class(), StatusClass::Success);
///
/// let see_other = Status::SeeOther;
/// assert_eq!(see_other.class(), StatusClass::Redirection);
///
/// let not_found = Status::NotFound;
/// assert_eq!(not_found.class(), StatusClass::ClientError);
///
/// let internal_error = Status::InternalServerError;
/// assert_eq!(internal_error.class(), StatusClass::ServerError);
///
/// let custom = Status::new(600);
/// assert_eq!(custom.class(), StatusClass::Unknown);
/// ```
pub const fn class(self) -> StatusClass {
match self.code / 100 {
1 => StatusClass::Informational,
2 => StatusClass::Success,
3 => StatusClass::Redirection,
4 => StatusClass::ClientError,
5 => StatusClass::ServerError,
_ => StatusClass::Unknown
}
}
/// Returns a Status given a standard status code `code`. If `code` is
/// not a known status code, `None` is returned.
///
/// # Example
///
/// Create a `Status` from a known `code`:
///
/// ```rust
/// # extern crate rocket;
/// use rocket::http::Status;
///
/// let not_found = Status::from_code(404);
/// assert_eq!(not_found, Some(Status::NotFound));
/// ```
///
/// Create a `Status` from an unknown `code`:
///
/// ```rust
/// # extern crate rocket;
/// use rocket::http::Status;
///
/// let unknown = Status::from_code(600);
/// assert!(unknown.is_none());
/// ```
pub const fn from_code(code: u16) -> Option<Status> {
match code {
$($code => Some(Status::$name),)+
_ => None
}
}
/// Returns the canonical reason phrase if `self` corresponds to a
/// canonical, known status code. Otherwise, returns `None`.
///
/// # Example
///
/// Reason phrase from a known `code`:
///
/// ```rust
/// # extern crate rocket;
/// use rocket::http::Status;
///
/// assert_eq!(Status::Created.reason(), Some("Created"));
/// assert_eq!(Status::new(200).reason(), Some("OK"));
/// ```
///
/// Absent phrase from an unknown `code`:
///
/// ```rust
/// # extern crate rocket;
/// use rocket::http::Status;
///
/// assert_eq!(Status::new(499).reason(), None);
/// ```
pub const fn reason(&self) -> Option<&'static str> {
match self.code {
$($code => Some($reason),)+
_ => None
}
}
/// Returns the canonical reason phrase if `self` corresponds to a
/// canonical, known status code, or an unspecified but relevant reason
/// phrase otherwise.
///
/// # Example
///
/// ```rust
/// # extern crate rocket;
/// use rocket::http::Status;
///
/// assert_eq!(Status::NotFound.reason_lossy(), "Not Found");
/// assert_eq!(Status::new(100).reason_lossy(), "Continue");
/// assert!(!Status::new(699).reason_lossy().is_empty());
/// ```
pub const fn reason_lossy(&self) -> &'static str {
if let Some(lossless) = self.reason() {
return lossless;
}
match self.class() {
StatusClass::Informational => "Informational",
StatusClass::Success => "Success",
StatusClass::Redirection => "Redirection",
StatusClass::ClientError => "Client Error",
StatusClass::ServerError => "Server Error",
StatusClass::Unknown => "Unknown"
}
}
};
}
impl Status {
ctrs! {
100, "100", Continue => "Continue",
101, "101", SwitchingProtocols => "Switching Protocols",
102, "102", Processing => "Processing",
200, "200", Ok => "OK",
201, "201", Created => "Created",
202, "202", Accepted => "Accepted",
203, "203", NonAuthoritativeInformation => "Non-Authoritative Information",
204, "204", NoContent => "No Content",
205, "205", ResetContent => "Reset Content",
206, "206", PartialContent => "Partial Content",
207, "207", MultiStatus => "Multi-Status",
208, "208", AlreadyReported => "Already Reported",
226, "226", ImUsed => "IM Used",
300, "300", MultipleChoices => "Multiple Choices",
301, "301", MovedPermanently => "Moved Permanently",
302, "302", Found => "Found",
303, "303", SeeOther => "See Other",
304, "304", NotModified => "Not Modified",
305, "305", UseProxy => "Use Proxy",
307, "307", TemporaryRedirect => "Temporary Redirect",
308, "308", PermanentRedirect => "Permanent Redirect",
400, "400", BadRequest => "Bad Request",
401, "401", Unauthorized => "Unauthorized",
402, "402", PaymentRequired => "Payment Required",
403, "403", Forbidden => "Forbidden",
404, "404", NotFound => "Not Found",
405, "405", MethodNotAllowed => "Method Not Allowed",
406, "406", NotAcceptable => "Not Acceptable",
407, "407", ProxyAuthenticationRequired => "Proxy Authentication Required",
408, "408", RequestTimeout => "Request Timeout",
409, "409", Conflict => "Conflict",
410, "410", Gone => "Gone",
411, "411", LengthRequired => "Length Required",
412, "412", PreconditionFailed => "Precondition Failed",
413, "413", PayloadTooLarge => "Payload Too Large",
414, "414", UriTooLong => "URI Too Long",
415, "415", UnsupportedMediaType => "Unsupported Media Type",
416, "416", RangeNotSatisfiable => "Range Not Satisfiable",
417, "417", ExpectationFailed => "Expectation Failed",
418, "418", ImATeapot => "I'm a teapot",
421, "421", MisdirectedRequest => "Misdirected Request",
422, "422", UnprocessableEntity => "Unprocessable Entity",
423, "423", Locked => "Locked",
424, "424", FailedDependency => "Failed Dependency",
426, "426", UpgradeRequired => "Upgrade Required",
428, "428", PreconditionRequired => "Precondition Required",
429, "429", TooManyRequests => "Too Many Requests",
431, "431", RequestHeaderFieldsTooLarge => "Request Header Fields Too Large",
451, "451", UnavailableForLegalReasons => "Unavailable For Legal Reasons",
500, "500", InternalServerError => "Internal Server Error",
501, "501", NotImplemented => "Not Implemented",
502, "502", BadGateway => "Bad Gateway",
503, "503", ServiceUnavailable => "Service Unavailable",
504, "504", GatewayTimeout => "Gateway Timeout",
505, "505", HttpVersionNotSupported => "HTTP Version Not Supported",
506, "506", VariantAlsoNegotiates => "Variant Also Negotiates",
507, "507", InsufficientStorage => "Insufficient Storage",
508, "508", LoopDetected => "Loop Detected",
510, "510", NotExtended => "Not Extended",
511, "511", NetworkAuthenticationRequired => "Network Authentication Required"
}
}
impl fmt::Display for Status {
#[inline(always)]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} {}", self.code, self.reason_lossy())
}
}
#[cfg(feature = "serde")]
mod serde_impl {
use super::*;
use serde::ser::{Serialize, Serializer};
use serde::de::{Deserialize, Deserializer, Error, Visitor, Unexpected};
impl Serialize for Status {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_u16(self.code)
}
}
struct DeVisitor;
impl<'de> Visitor<'de> for DeVisitor {
type Value = Status;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "HTTP status code integer in range [100, 600)")
}
fn visit_i64<E: Error>(self, v: i64) -> Result<Self::Value, E> {
if v < 100 || v >= 600 {
return Err(E::invalid_value(Unexpected::Signed(v), &self));
}
Ok(Status::new(v as u16))
}
fn visit_u64<E: Error>(self, v: u64) -> Result<Self::Value, E> {
if v < 100 || v >= 600 {
return Err(E::invalid_value(Unexpected::Unsigned(v), &self));
}
Ok(Status::new(v as u16))
}
}
impl<'de> Deserialize<'de> for Status {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
deserializer.deserialize_u16(DeVisitor)
}
}
}
```
|
Boudales is an alternative name to several wine grape varieties including:
Canari noir
Cinsaut
Grolleau (grape)
|
```javascript
"use strict";
function __export(m) {
for (var p in m) if (!exports.hasOwnProperty(p)) exports[p] = m[p];
}
Object.defineProperty(exports, "__esModule", { value: true });
__export(require("rxjs-compat/util/isPromise"));
//# sourceMappingURL=isPromise.js.map
```
|
Pedro do Rosário is a municipality in the state of Maranhão in the Northeast region of Brazil.
The municipality contains a small part of the Baixada Maranhense Environmental Protection Area, a sustainable use conservation unit created in 1991 that has been a Ramsar Site since 2000.
See also
List of municipalities in Maranhão
References
Municipalities in Maranhão
|
John Dorrington (2 June 1881 – 9 January 1944) was an English footballer who played as a goalkeeper. He spent his entire 12-year professional career with Birmingham (renamed from Small Heath in 1905), for which he made 106 appearances in the Football League.
Born in Smethwick in 1881, Dorrington played football for minor clubs in that area. While with Soho Villa, he signed amateur forms with West Bromwich Albion, but never played for their first team. After a year with Kidderminster Harriers of the Birmingham and District League, he joined Football League First Division club Small Heath in 1901. The consistency and reliability of Nat Robinson meant that Dorrington made only ten appearances in his first six seasons. He was first-choice goalkeeper for the next two years before injury intervened, then regained his place in 1909–10 before blood poisoning effectively put an end to his career and nearly cost him his leg. He retired from playing in 1913.
Dorrington then ran several pubs, organised charity football matches, and coached the junior teams at Aston Villa. He died in 1944 at the age of 62.
Personal life
John Dorrington was born on 2 June 1881 in Smethwick, which was then in Staffordshire, the son of John and Julia Dorrington. At the time of the 1891 Census, the family were living in Watt Street, Handsworth, Dorrington's father was working as a steam hammerman, and there were five children. The 1901 Census records the 19-year-old Dorrington still living in the family home in Handsworth and working as a turner. He married Helen Sarah Jones in 1902.
Dorrington died in Birmingham General Hospital on 9 January 1944 after a short illness; he was 62. At the time of his death, he and his wife had a son and a daughter.
Early football career
He played local football for Langley St Michaels and West Smethwick, and spent the 1899–1900 season with Soho Villa of the Birmingham Junior League. In September 1899, the Bromsgrove Messenger "Spectator" thought he was "the least satisfactory member of his team, and seemed to lack confidence in dealing with shots", but five months later, the same reporter thought that "it says much for Dorrington's goalkeeping that the score was not much heavier." During that season, he signed amateur forms with Football League First Division club West Bromwich Albion, but made no appearances for the senior side. In 1900 he joined Kidderminster Harriers, where he earned himself a reputation as "a player with a big future [who] was held by many to be the smartest custodian in the Birmingham and District League."
Dorrington signed for Small Heath, newly promoted to the First Division, in May 1901 as backup for the established Nat Robinson. Robinson was ever-present through the 1901–02 relegation season and into October 1902, when a bad cold kept him at home, allowing Dorrington to make his club and Football League debut as a late replacement in a 2–2 draw away to Burslem Port Vale. The Sports Argus opined that "Good reserve goalkeepers are not picked up at every street corner. But in Dorrington, Small Heath have one who is quite capable of adequately stepping into Robinson's shoes at any time." A 6–1 defeat away to fellow promotion hopefuls Woolwich Arsenal saw Robinson dropped in favour of Dorrington for the last four matches of the season, and three wins were enough to secure runners-up spot. Dorrington began the 1903–04 First Division season as first choice, but ten goals conceded in the first three games by a team disrupted by injuries cost him his place, and Robinson played out the rest of the season.
It was not for another three years, with Robinson away representing the Football League XI against their Irish counterparts, that Dorrington next appeared for the first team, by which the club had renamed itself Birmingham; they lost 2–0 to Sheffield United. He was granted a benefit match to recognise five years' service, and the reserve match against West Bromwich Albion on Christmas Eve 1906the last game played at Coventry Road before the club moved to its newly built St Andrew's Groundwas chosen; 2,000 spectators turned up to see Birmingham win 5–2 on a soaking pitch and Dorrington save a penalty.
Regular first-team football
Dorrington finally replaced Robinson as first-choice goalkeeper in November 1907, "it being thought advisable, owing to the latter suffering from nervous strain, to engage him in less strenuous football." It was not a generally popular decision, and it took Dorrington a few matches to settle, but he "soon began to improve, and in a few weeks he was as capable a custodian as could be found".
Nevertheless, the team returned to the second tier for the 1908–09 season, during which Dorrington missed four matches with a shoulder injury but was otherwise ever-present. In his column in Thomson's Weekly News in February 1909, he wrote that "if Birmingham had had anything like luck this season in the matter of freedom from injuries they might now be well in the running for promotion." They finished 11th of 20. He also enhanced his reputation as a saver of penalties with three in two matches in September 1908, one in a draw with Fulham when the taker scuffed his kick, and two in the Lord Mayor's Charity Cup against Aston Villa two days later. Birmingham led 3–1 when Villa were awarded a penalty for handball, which Harry Hampton took and Dorrington saved, but before Joe Bache could reach the rebound, he was tripped; Hampton took the second kick, and Dorrington again saved. Birmingham won the match 5–2. The match referee, J.G.A. Sharpe, wrote afterwards that "Dorrington is a wonder at saving penalties, and Birmingham will do well to play him regular in the first team."
Dorrington began the 1909–10 season in the first team, but after a 2–0 defeat away to Clapton Orient in November in which both goals came from his errors, he missed the next game with a sprained ankle, Arthur Box took over, and Dorrington played only twice more, the following April. The team finished bottom of the table and were re-elected to the League. Box moved on, Dorrington stayed, and again began the season as first choice. In October, he was the only representative of the Birmingham club to be selected for the Birmingham Association eleven to face their London counterparts in an inter-association match; London won 3–0.
Injuries put an end to his career
Facing Wolverhampton Wanderers in November 1910, he dived to block a forward in the act of shooting and was kicked in the shoulder; while he was lying on the ground injured, the forward regained the ball and scored. The initial diagnosis was a broken shoulderblade, although the player believed there was no fracture, and he missed only three matches. During the drawn cup-tie against Oldham Athletic in January 1911, Dorrington suffered a cut knee, which became inflamed; he neglected to have it treated because he wanted to play in the replay. Blood poisoning ensued, an operation was performed at the Queen's Hospital, and "had another few hours elapsed amputation would probably have been necessary." He underwent further procedures over the next two weeks before being discharged from hospital, and later went on holiday to Bournemouth to aid his recovery. He finally retook the field for Birmingham's reserves on 3 April, and in the absence of Horace Bailey from the last match of the Second Division season, "proved that he [had] lost none of his skill as a custodian."
A mistake in the opening match of the 1911–12 seasonallowing a ball to bounce over his shoulder and into the netcost him his place. He made three league appearances in December, standing in for the amateur Bailey, whose business kept him unavailable, and in January 1912 injured an ankle and was out for several months. In 1912–13, his last season as a Birmingham player, his benefit match attracted a crowd of 8,000 and raised some £150. When not required for playing duties, he helped coach the players and acted as a scout for the club. According to the Evening Despatch, "the fact that the "Blues"' Reserves have done so well this season"the team finished fifth in the Birmingham League that season, and two years later won the title"is a tribute to his judgment. Most of the reserves have been signed up on the ex-custodian's recommendation." West Midlands football historian Tony Matthews described him as "a grand servant to Blues, totally fearless, with a jovial temperament that did wonders for morale, even in difficult times."
Later life and career
After retiring as a player, Dorrington went into the pub trade, first with the Green Dragon in Kinver, from where he organised a football team to play in the Kidderminster League. He was at the Dudley Arms in Cape Hill, Smethwick, and took over at the Holte Hotel, near Villa Park, in late 1924.
He was active in arranging charity football matches, and inaugurated an annual match between ex-professional players and the local police team in aid of the Smethwick Cripples' Union. In the 1930s, Dorrington worked as a scout for Aston Villa, and managed their Birmingham Combination team. He also ran a newsagent's and tobacconist's shop in Soho Road, Birmingham.
Career statistics
Honours
Small Heath
Football League Second Division runners-up: 1902–03
Sources
References
1881 births
1944 deaths
Footballers from Smethwick
Footballers from Staffordshire
English men's footballers
Men's association football goalkeepers
West Bromwich Albion F.C. players
Kidderminster Harriers F.C. players
Birmingham City F.C. players
English Football League players
Birmingham City F.C. non-playing staff
Aston Villa F.C. non-playing staff
Association football scouts
|
Carphontes is a genus of beetle in the family Cerambycidae, containing the following species:
Carphontes paradoxus Monne & Monne, 2010
Carphontes posticalis Bates, 1881
References
Acanthocinini
|
Richland Township is an inactive township in Scott County, in the U.S. state of Missouri. Richland Township was created in 1822, and so named on account of its rich soil.
References
Townships in Missouri
Townships in Scott County, Missouri
|
Gerda Geertens (born 11 August 1955) is a Dutch composer. She was born in Wildervank, and studied music and philosophy in Groningen. In 1981 she began the study of composition with Klaas de Vries at the Rotterdam Conservatory. Her compositions include chamber music, choir and solo singing and pieces for symphony orchestras.
Works
Geertens is noted for her chamber works. Selected works include:
Sarka song cycle for soprano and piano, 1993: no. 1. Afrika (Michaël Arnoldus Slory), no. 2. Komoto te na Egypte (Michaël Arnoldus Slory), no. 3. Sarka (Michaël Arnoldus Slory)
Nocturnal for flute, violin, violoncello, piano and percussion, 1994
She Weeps Over Rahoon for solo piano, 1985
Amarillis for 4 bamboo flutes (or recorders 4), 1985
Ash and lilac , for instrumental ensemble, 1988
Slinger, for string trio, 1989
Contrast, for saxophone quartet, 1990
Mexitli , Opus 1 for mixed choir and instrumental ensemble, 1981–1982, text: Theun de Vries
Split country, for violin, bass clarinet and tape, 1992
Leave it alone, audio clip, for 15 players, 1994
Heartland, for orchestra, 1994
Trope, for cello, 1987
en SeringenI, for flute, oboe, clarinet, violin, viola, cello, piano, harp, and percussion, 1988
References
1955 births
Living people
20th-century classical composers
Dutch women classical composers
Dutch classical composers
People from Veendam
20th-century women composers
|
The Taipei Trade Office in the Federal Republic of Nigeria () represents the interests of Taiwan in Nigeria in the absence of formal diplomatic relations, functioning as a de facto embassy.
Its counterpart in Taiwan is the Nigeria Trade Office in Taiwan, R.O.C. in New Taipei.
It also has responsibility for Taiwan's interests in Cameroon, Benin, Ghana, Gambia, Liberia and Sierra Leone. Previously, Taiwan had diplomatic relations with Gambia, and there was an Embassy of the Republic of China in Banjul. However, these were broken off in 2013 by President Yahya Jammeh. Liberia similarly broke off diplomatic relations with Taipei in 2003.
It is headed by a Representative, currently Morgan Chao.
History
The Mission was established in Lagos in 1991, before relocating to Abuja in 2001, despite requests from Beijing that it be located outside the Nigerian capital.
In January 2017, the government of Nigeria requested Taiwan to relocate the office back to Lagos from Abuja. On 8 December 2017, the office began the relocation from Abuja to Lagos and on 5 January 2018, the new office in Lagos was officially opened under the name Taipei Trade Office in the Federal Republic of Nigeria.
Representatives
Yang Tien-hsing
Morgan Chao
Vincent W.S. Yang
Andy Yih-Ping Liu
See also
List of diplomatic missions in Nigeria
References
External links
Taipei Trade Office in the Federal Republic of Nigeria
Nigeria
Taiwan
1991 establishments in Nigeria
|
```smalltalk
using Aurora.Utils;
using System;
using System.Windows;
using System.Windows.Controls;
using Keys = System.Windows.Forms.Keys;
namespace Aurora.Controls {
/// <summary>
/// Interaction logic for Control_SingleKeyEditor.xaml
/// </summary>
public partial class Control_SingleKeyEditor : UserControl {
/// <summary>A reference to the editor that is currently listening for a keypress</summary>
private static Control_SingleKeyEditor listeningEditor;
// Static constructor so that we only have to add a input event listener once.
static Control_SingleKeyEditor() {
Global.InputEvents.KeyDown += InputEvents_KeyDown;
}
// Instance constructor to create UI elements
public Control_SingleKeyEditor() {
InitializeComponent();
DataContext = this;
}
// Assign or unassign the `listeningEditor` from this UserControl
private void AssignButton_Click(object sender, RoutedEventArgs e) {
var assigning = listeningEditor != this;
listeningEditor?.UpdateButtonText(false);
UpdateButtonText(assigning);
listeningEditor = assigning ? this : null;
}
private void UpdateButtonText(bool assigning) {
assignButton.Content = assigning ? "Press a key" : "Assign";
}
private static void InputEvents_KeyDown(object sender, SharpDX.RawInput.KeyboardInputEventArgs e) {
if (listeningEditor != null)
listeningEditor.Dispatcher.Invoke(() => {
listeningEditor.SelectedKey = e.Key;
listeningEditor.UpdateButtonText(false);
listeningEditor = null;
});
}
// Dependency Property
public static readonly DependencyProperty SelectedKeyProperty = DependencyProperty.Register("SelectedKey", typeof(Keys), typeof(Control_SingleKeyEditor), new FrameworkPropertyMetadata(default(Keys), FrameworkPropertyMetadataOptions.AffectsRender));
public Keys SelectedKey {
get => (Keys)GetValue(SelectedKeyProperty);
set => SetValue(SelectedKeyProperty, value);
}
}
}
```
|
The Old Town Hall is a historic town hall building at 10 Kendal Road in Tyngsborough, Massachusetts. The wood-frame building was built in 1834 as a church to house the local Baptist congregation, a role it served until 1857, when it was sold to the town. The styling of the building is predominantly Federal, although its cupola is a late 19th-century Colonial Revival addition. The building was listed on the National Register of Historic Places in 2005.
After suffering a termite infestation in the 1990s(?), the town hall and the nearby Littlefield Library were closed. Their functions were moved to a new civic building away from the center. In 2012, the building underwent a $2.5 million renovation using Community Preservation Funds. The project concluded in December, 2013 and was reopened to the public in January 2014. It is primarily used for civic events and Special Hearings.
See also
National Register of Historic Places listings in Middlesex County, Massachusetts
References
Tyngsborough
Former seats of local government
Buildings and structures in Middlesex County, Massachusetts
Government buildings completed in 1834
National Register of Historic Places in Middlesex County, Massachusetts
|
James Robert McLachlan is a former Canadian politician, who represented the electoral district of Faro in the Yukon Legislative Assembly from 1985 to 1989 and from 2001 to 2002. He was a member of the Yukon Liberal Party, and the party's leader from 1986 to 1989.
He became the party's interim leader after the resignation of Roger Coles due to criminal charges.
Electoral record
2002 general election
|-
| style="width: 130px" |Liberal
| Jim McLachlan
|align="right"| 181
|align="right"| 28.3%
|align="right"| –
|-
|NDP
|Buzz Burgess
|align="right"| 162
|align="right"| 25.3%
|align="right"| –
|- bgcolor="white"
!align="left" colspan=3|Total
!align="right"|640
!align="right"|100.0%
!align="right"| –
2000 By-election
|-
| Liberal
| Jim McLachlan
|align="right"| 129
|align="right"| 65.5%
|align="right"| +42.6%
|NDP
| Harold Boehm
|align="right"| 66
|align="right"| 33.5%
|align="right"| -43.1%
|- bgcolor="white"
!align="left" colspan=3|Total
!align="right"| 197
!align="right"| 100.0%
!align="right"| –
|}
On the resignation of Trevor Harding (2000).
2000 general election
|-
|NDP
| Trevor Harding
| align="right"| 177
| align="right"| 76.6%
| align="right"| -17.7%
| Liberal
| Jim McLachlan
| align="right"| 53
| align="right"| 22.9%
| align="right"| +17.7%
|- bgcolor="white"
!align="left" colspan=3|Total
! align=right| 231
! align=right| 100.0%
! align=right| –
|}
1992 general election
|-
|NDP
| Trevor Harding
| align="right"| 388
| align="right"| 53.2%
| align="right"| +10.4%
| Liberal
| Jim McLachlan
| align="right"| 337
| align="right"| 46.2%
| align="right"| +9.1%
|- bgcolor="white"
!align="left" colspan=3|Total
! align=right| 729
! align=right| 100.0%
! align=right| –
|}
1989 general election
|-
| NDP
| Maurice Byblow
| align="right"| 194
| align="right"| 42.8%
| align="right"| +9.3%
|-
| Liberal
| Jim McLachlan
| align="right"| 168
| align="right"| 37.1%
| align="right"| -2.2%
|-
|-
! align=left colspan=3|Total
! align=right| 453
! align=right| 100.0%
! align=right| –
|}
1985 general election
|-
| Liberal
| Jim McLachlan
| align="right"| 142
| align="right"| 39.3%
| align="right"| +17.1%
|-
| NDP
| Sibyl Frei
| align="right"| 121
| align="right"| 33.5%
| align="right"| -15.8%
|-
|-
! align=left colspan=3|Total
! align=right| 361
! align=right| 100.0%
! align=right| –
|}
References
Yukon Liberal Party MLAs
Living people
1943 births
Yukon Liberal Party leaders
|
```c++
/****************************************************************************
* MeshLab o o *
* A versatile mesh processing toolbox o o *
* _ O _ *
* Visual Computing Lab /\/| *
* ISTI - Italian National Research Council | *
* \ *
* All rights reserved. *
* This program is free software; you can redistribute it and/or modify *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* for more details. *
* *
****************************************************************************/
#include "meshfilter.h"
#include <vcg/complex/algorithms/clean.h>
#include <vcg/complex/algorithms/stat.h>
#include <vcg/complex/algorithms/smooth.h>
#include <vcg/complex/algorithms/hole.h>
#include <vcg/complex/algorithms/refine_loop.h>
#include <vcg/complex/algorithms/bitquad_support.h>
#include <vcg/complex/algorithms/bitquad_creation.h>
#include <vcg/complex/algorithms/clustering.h>
#include <vcg/complex/algorithms/attribute_seam.h>
#include <vcg/complex/algorithms/update/curvature.h>
#include <vcg/complex/algorithms/update/curvature_fitting.h>
#include <vcg/complex/algorithms/pointcloud_normal.h>
#include <vcg/complex/algorithms/isotropic_remeshing.h>
#include <vcg/complex/algorithms/refine_doosabin.h>
#include <vcg/space/fitting3.h>
#include <wrap/gl/glu_tessellator_cap.h>
#include "quadric_simp.h"
using namespace std;
using namespace vcg;
using namespace vcg::tri;
// Polygonal mesh used by doo sabin refinement
class PEdge;
class PFace;
class PVertex;
struct PUsedTypes : public UsedTypes<Use<PVertex> ::AsVertexType,
Use<PEdge> ::AsEdgeType,
Use<PFace> ::AsFaceType> {};
class PVertex : public Vertex<PUsedTypes, vertex::Coord3f, vertex::Normal3f, vertex::Qualityf, vertex::Color4b, vertex::BitFlags > {};
class PEdge : public Edge< PUsedTypes, edge::VertexRef, edge::BitFlags> {};
class PFace :public vcg::Face<
PUsedTypes,
face::PolyInfo, // this is necessary if you use component in vcg/simplex/face/component_polygon.h
face::PFVAdj, // Pointer to the vertices (just like FVAdj )
face::PFFAdj, // Pointer to the vertices (just like FVAdj )
face::Color4b,
face::BitFlags, // bit flags
face::Normal3f, // normal
face::WedgeTexCoord2f
> {};
class PMesh : public tri::TriMesh< vector<PVertex>, vector<PEdge>, vector<PFace> > {};
ExtraMeshFilterPlugin::ExtraMeshFilterPlugin(void)
{
typeList = {
FP_LOOP_SS,
FP_BUTTERFLY_SS,
FP_CLUSTERING,
FP_QUADRIC_SIMPLIFICATION,
FP_QUADRIC_TEXCOORD_SIMPLIFICATION,
FP_EXPLICIT_ISOTROPIC_REMESHING,
FP_MIDPOINT,
FP_REORIENT,
FP_FLIP_AND_SWAP,
FP_ROTATE,
FP_ROTATE_FIT,
FP_PRINCIPAL_AXIS,
FP_SCALE,
FP_CENTER,
FP_INVERT_FACES,
FP_NORMAL_EXTRAPOLATION,
FP_NORMAL_SMOOTH_POINTCLOUD,
FP_COMPUTE_PRINC_CURV_DIR,
FP_CLOSE_HOLES,
FP_FREEZE_TRANSFORM,
FP_RESET_TRANSFORM,
FP_INVERT_TRANSFORM,
FP_SET_TRANSFORM_PARAMS,
FP_SET_TRANSFORM_MATRIX,
FP_CYLINDER_UNWRAP,
FP_REFINE_CATMULL,
FP_REFINE_HALF_CATMULL,
FP_REFINE_DOOSABIN,
FP_QUAD_DOMINANT,
FP_MAKE_PURE_TRI,
FP_QUAD_PAIRING,
FP_FAUX_CREASE,
FP_FAUX_EXTRACT,
FP_VATTR_SEAM,
FP_REFINE_LS3_LOOP,
FP_SLICE_WITH_A_PLANE,
FP_PERIMETER_POLYLINE
};
for(ActionIDType tt : types())
actionList.push_back(new QAction(filterName(tt), this));
tri::TriEdgeCollapseQuadricParameter lpp;
lastq_QualityThr = lpp.QualityThr;// 0.3f;
lastq_PreserveBoundary = false;
lastq_PreserveNormal = false;
lastq_PreserveTopology = false;
lastq_OptimalPlacement = true;
lastq_Selected = false;
lastq_PlanarQuadric = false;
lastq_PlanarWeight = lpp.QualityQuadricWeight;
lastq_QualityWeight = false;
lastq_BoundaryWeight = lpp.BoundaryQuadricWeight;
lastqtex_QualityThr = 0.3f;
lastqtex_extratw = 1.0;
lastisor_Iterations = 10;
lastisor_RemeshingAdaptivity = false;
lastisor_SelectedOnly = false;
lastisor_RefineFlag = true;
lastisor_CollapseFlag = true;
lastisor_SmoothFlag = true;
lastisor_SwapFlag = true;
lastisor_ProjectFlag = true;
lastisor_FeatureDeg = 30.0f;
}
QString ExtraMeshFilterPlugin::pluginName() const
{
return "FilterMeshing";
}
ExtraMeshFilterPlugin::FilterClass ExtraMeshFilterPlugin::getClass(const QAction * a) const
{
switch (ID(a))
{
case FP_BUTTERFLY_SS :
case FP_LOOP_SS :
case FP_MIDPOINT :
case FP_QUADRIC_SIMPLIFICATION :
case FP_QUADRIC_TEXCOORD_SIMPLIFICATION :
case FP_EXPLICIT_ISOTROPIC_REMESHING :
case FP_CLUSTERING :
case FP_CLOSE_HOLES :
case FP_FAUX_CREASE :
case FP_FAUX_EXTRACT :
case FP_VATTR_SEAM :
case FP_REFINE_LS3_LOOP : return FilterPlugin::Remeshing;
case FP_REFINE_CATMULL :
case FP_REFINE_HALF_CATMULL :
case FP_REFINE_DOOSABIN :
case FP_QUAD_DOMINANT :
case FP_MAKE_PURE_TRI :
case FP_QUAD_PAIRING : return FilterClass(Remeshing+Polygonal);
case FP_NORMAL_EXTRAPOLATION : return FilterClass( Normal + PointSet );
case FP_NORMAL_SMOOTH_POINTCLOUD : return FilterClass( Normal + PointSet );
case FP_INVERT_FACES :
case FP_REORIENT :
case FP_ROTATE :
case FP_ROTATE_FIT :
case FP_CENTER :
case FP_SCALE :
case FP_PRINCIPAL_AXIS :
case FP_FLIP_AND_SWAP : return FilterPlugin::Normal;
case FP_COMPUTE_PRINC_CURV_DIR : return FilterClass( Normal + VertexColoring );
case FP_FREEZE_TRANSFORM :
case FP_INVERT_TRANSFORM :
case FP_SET_TRANSFORM_PARAMS :
case FP_SET_TRANSFORM_MATRIX :
case FP_RESET_TRANSFORM : return FilterClass(Normal + Layer);
case FP_PERIMETER_POLYLINE :
case FP_SLICE_WITH_A_PLANE :
case FP_CYLINDER_UNWRAP : return FilterPlugin::Measure;
default : assert(0); return FilterPlugin::Generic;
}
return FilterPlugin::Generic;
}
int ExtraMeshFilterPlugin::getPreConditions(const QAction *filter) const
{
switch (ID(filter))
{
case FP_MAKE_PURE_TRI : return MeshModel::MM_POLYGONAL;
case FP_LOOP_SS :
case FP_BUTTERFLY_SS :
case FP_MIDPOINT :
case FP_REFINE_CATMULL :
case FP_QUADRIC_SIMPLIFICATION :
case FP_EXPLICIT_ISOTROPIC_REMESHING :
case FP_REORIENT :
case FP_INVERT_FACES :
case FP_COMPUTE_PRINC_CURV_DIR :
case FP_CLOSE_HOLES :
case FP_CYLINDER_UNWRAP :
case FP_REFINE_HALF_CATMULL :
case FP_REFINE_DOOSABIN :
case FP_QUAD_DOMINANT :
case FP_QUAD_PAIRING :
case FP_FAUX_CREASE :
case FP_FAUX_EXTRACT :
case FP_VATTR_SEAM :
case FP_SLICE_WITH_A_PLANE :
case FP_PERIMETER_POLYLINE :
case FP_REFINE_LS3_LOOP : return MeshModel::MM_FACENUMBER;
case FP_NORMAL_SMOOTH_POINTCLOUD : return MeshModel::MM_VERTNORMAL;
case FP_QUADRIC_TEXCOORD_SIMPLIFICATION : return MeshModel::MM_WEDGTEXCOORD;
case FP_CLUSTERING :
case FP_SCALE :
case FP_CENTER :
case FP_ROTATE :
case FP_ROTATE_FIT :
case FP_PRINCIPAL_AXIS :
case FP_FLIP_AND_SWAP :
case FP_FREEZE_TRANSFORM :
case FP_RESET_TRANSFORM :
case FP_INVERT_TRANSFORM :
case FP_SET_TRANSFORM_PARAMS :
case FP_SET_TRANSFORM_MATRIX :
case FP_NORMAL_EXTRAPOLATION : return MeshModel::MM_NONE;
}
return MeshModel::MM_NONE;
}
int ExtraMeshFilterPlugin::getRequirements(const QAction* filter)
{
switch (ID(filter)){
case FP_EXPLICIT_ISOTROPIC_REMESHING :
return MeshModel::MM_FACEQUALITY | MeshModel::MM_VERTQUALITY;
default:
return MeshModel::MM_NONE;
}
}
QString ExtraMeshFilterPlugin::pythonFilterName(ActionIDType f) const
{
switch (f) {
case FP_LOOP_SS: return tr("meshing_surface_subdivision_loop");
case FP_BUTTERFLY_SS: return tr("meshing_surface_subdivision_butterfly");
case FP_MIDPOINT: return tr("meshing_surface_subdivision_midpoint");
case FP_REFINE_CATMULL: return tr("meshing_surface_subdivision_catmull_clark");
case FP_REFINE_DOOSABIN: return tr("meshing_surface_subdivision_doo_sabin");
case FP_QUADRIC_SIMPLIFICATION: return tr("meshing_decimation_quadric_edge_collapse");
case FP_QUADRIC_TEXCOORD_SIMPLIFICATION:
return tr("meshing_decimation_quadric_edge_collapse_with_texture");
case FP_EXPLICIT_ISOTROPIC_REMESHING: return tr("meshing_isotropic_explicit_remeshing");
case FP_CLUSTERING: return tr("meshing_decimation_clustering");
case FP_REORIENT: return tr("meshing_re_orient_faces_coherently");
case FP_INVERT_FACES: return tr("meshing_invert_face_orientation");
case FP_SCALE: return tr("compute_matrix_from_scaling_or_normalization");
case FP_CENTER: return tr("compute_matrix_from_translation");
case FP_ROTATE: return tr("compute_matrix_from_rotation");
case FP_ROTATE_FIT: return tr("compute_matrix_by_fitting_to_plane");
case FP_PRINCIPAL_AXIS: return tr("compute_matrix_by_principal_axis");
case FP_FLIP_AND_SWAP: return tr("apply_matrix_flip_or_swap_axis");
case FP_FREEZE_TRANSFORM: return tr("apply_matrix_freeze");
case FP_RESET_TRANSFORM: return tr("set_matrix_identity");
case FP_INVERT_TRANSFORM: return tr("apply_matrix_inverse");
case FP_SET_TRANSFORM_PARAMS: return tr("compute_matrix_from_translation_rotation_scale");
case FP_SET_TRANSFORM_MATRIX: return tr("set_matrix");
case FP_NORMAL_EXTRAPOLATION: return tr("compute_normal_for_point_clouds");
case FP_NORMAL_SMOOTH_POINTCLOUD: return tr("apply_normal_point_cloud_smoothing");
case FP_COMPUTE_PRINC_CURV_DIR: return tr("compute_curvature_principal_directions_per_vertex");
case FP_CLOSE_HOLES: return tr("meshing_close_holes");
case FP_CYLINDER_UNWRAP: return tr("generate_cylindrical_unwrapping");
case FP_REFINE_HALF_CATMULL: return tr("meshing_tri_to_quad_by_4_8_subdivision");
case FP_QUAD_DOMINANT: return tr("meshing_tri_to_quad_dominant");
case FP_MAKE_PURE_TRI: return tr("meshing_poly_to_tri");
case FP_QUAD_PAIRING: return tr("meshing_tri_to_quad_by_smart_triangle_pairing");
case FP_FAUX_CREASE: return tr("compute_selection_crease_per_edge");
case FP_FAUX_EXTRACT: return tr("generate_polyline_from_selected_edges");
case FP_VATTR_SEAM: return tr("meshing_vertex_attribute_seam");
case FP_REFINE_LS3_LOOP: return tr("meshing_surface_subdivision_ls3_loop");
case FP_SLICE_WITH_A_PLANE: return tr("generate_polyline_from_planar_section");
case FP_PERIMETER_POLYLINE: return tr("generate_polyline_from_selection_perimeter");
default: assert(0); return QString();
}
}
QString ExtraMeshFilterPlugin::filterName(ActionIDType filter) const
{
switch (filter) {
case FP_LOOP_SS: return tr("Subdivision Surfaces: Loop");
case FP_BUTTERFLY_SS: return tr("Subdivision Surfaces: Butterfly Subdivision");
case FP_MIDPOINT: return tr("Subdivision Surfaces: Midpoint");
case FP_REFINE_CATMULL: return tr("Subdivision Surfaces: Catmull-Clark");
case FP_REFINE_DOOSABIN: return tr("Subdivision Surfaces: Doo Sabin");
case FP_QUADRIC_SIMPLIFICATION: return tr("Simplification: Quadric Edge Collapse Decimation");
case FP_QUADRIC_TEXCOORD_SIMPLIFICATION:
return tr("Simplification: Quadric Edge Collapse Decimation (with texture)");
case FP_EXPLICIT_ISOTROPIC_REMESHING: return tr("Remeshing: Isotropic Explicit Remeshing");
case FP_CLUSTERING: return tr("Simplification: Clustering Decimation");
case FP_REORIENT: return tr("Re-Orient all faces coherently");
case FP_INVERT_FACES: return tr("Invert Faces Orientation");
case FP_SCALE: return tr("Transform: Scale, Normalize");
case FP_CENTER: return tr("Transform: Translate, Center, set Origin");
case FP_ROTATE: return tr("Transform: Rotate");
case FP_ROTATE_FIT: return tr("Transform: Rotate to Fit to a plane");
case FP_PRINCIPAL_AXIS: return tr("Transform: Align to Principal Axis");
case FP_FLIP_AND_SWAP: return tr("Transform: Flip and/or swap axis");
case FP_FREEZE_TRANSFORM: return tr("Matrix: Freeze Current Matrix");
case FP_RESET_TRANSFORM: return tr("Matrix: Reset Current Matrix");
case FP_INVERT_TRANSFORM: return tr("Matrix: Invert Current Matrix");
case FP_SET_TRANSFORM_PARAMS: return tr("Matrix: Set from translation/rotation/scale");
case FP_SET_TRANSFORM_MATRIX: return tr("Matrix: Set/Copy Transformation");
case FP_NORMAL_EXTRAPOLATION: return tr("Compute normals for point sets");
case FP_NORMAL_SMOOTH_POINTCLOUD: return tr("Smooth normals on point sets");
case FP_COMPUTE_PRINC_CURV_DIR: return tr("Compute curvature principal directions");
case FP_CLOSE_HOLES: return tr("Close Holes");
case FP_CYLINDER_UNWRAP: return tr("Geometric Cylindrical Unwrapping");
case FP_REFINE_HALF_CATMULL: return tr("Tri to Quad by 4-8 Subdivision");
case FP_QUAD_DOMINANT: return tr("Turn into Quad-Dominant mesh");
case FP_MAKE_PURE_TRI: return tr("Turn into a Pure-Triangular mesh");
case FP_QUAD_PAIRING: return tr("Tri to Quad by smart triangle pairing");
case FP_FAUX_CREASE: return tr("Select Crease Edges");
case FP_FAUX_EXTRACT: return tr("Build a Polyline from Selected Edges");
case FP_VATTR_SEAM: return tr("Vertex Attribute Seam");
case FP_REFINE_LS3_LOOP: return tr("Subdivision Surfaces: LS3 Loop");
case FP_SLICE_WITH_A_PLANE: return tr("Compute Planar Section");
case FP_PERIMETER_POLYLINE: return tr("Create Selection Perimeter Polyline");
default: assert(0); return QString();
}
}
QString ExtraMeshFilterPlugin::filterInfo(ActionIDType filterID) const
{
switch (filterID)
{
case FP_REFINE_LS3_LOOP : return tr("Apply LS3 Subdivision Surface algorithm using Loop's weights. This refinement method take normals into account. "
"<br>See:"
"<i>Boye', S. Guennebaud, G. & Schlick, C.</i> <br>"
"<b>Least squares subdivision surfaces</b><br>"
"Computer Graphics Forum, 2010.<br/><br/>"
"Alternatives weighting schemes are based on the paper: "
"<i>Barthe, L. & Kobbelt, L.</i><br>"
"<b>Subdivision scheme tuning around extraordinary vertices</b><br>"
"Computer Aided Geometric Design, 2004, 21, 561-583.<br/>"
"The current implementation of these schemes don't handle vertices of valence > 12");
case FP_LOOP_SS : return tr("Apply Loop's Subdivision Surface algorithm. It is an approximant refinement method and it works for every triangle and has rules for extraordinary vertices.<br>");
case FP_BUTTERFLY_SS : return tr("Apply Butterfly Subdivision Surface algorithm. It is an interpolated refinement method, defined on arbitrary triangular meshes. The scheme is known to be C1 but not C2 on regular meshes<br>");
case FP_MIDPOINT : return tr("Apply a plain subdivision scheme where every edge is split on its midpoint. Useful to uniformly refine a mesh substituting each triangle with four smaller triangles.");
case FP_REFINE_CATMULL : return tr("Apply the Catmull-Clark Subdivision Surfaces. Note that position of the new vertices is simply linearly interpolated. "
"If the mesh is triangle based (no <a href='path_to_url edges</a>) it generates a quad mesh, otherwise it honores it the faux-edge bits");
case FP_REFINE_DOOSABIN : return tr("Apply the DooSabin Subdivision Surfaces. It is a Dual approximating refinement scheme that creates a new face for each vertex, edge and face. On a pure quad mesh it will add non quad face for each extraordinarhy vertex in the mesh (e.g. in a cube it will add a triangular face for each corner. On the other hand after a refinement step all the vertices will have degree 4.");
case FP_REFINE_HALF_CATMULL : return tr("Convert a tri mesh into a quad mesh by applying a 4-8 subdivision scheme."
"It introduces less overhead than the plain Catmull-Clark Subdivision Surfaces"
"(it adds only a single vertex for each triangle instead of four)."
"<br> See: <br>"
"<b>4-8 Subdivision</b>"
"<br> <i>Luiz Velho, Denis Zorin </i>"
"<br>CAGD, volume 18, Issue 5, Pages 397-427. ");
case FP_CLUSTERING : return tr("Collapse vertices by creating a three dimensional grid enveloping the mesh and discretizes them based on the cells of this grid");
case FP_QUADRIC_SIMPLIFICATION : return tr("Simplify a mesh using a quadric based edge-collapse strategy. A variant of the well known Garland and Heckbert simplification algorithm with different weighting schemes to better cope with aspect ration and planar/degenerate quadrics areas."
"<br> See: <br>"
"<i>M. Garland and P. Heckbert.</i> <br>"
"<b>Surface Simplification Using Quadric Error Metrics</b> (<a href='path_to_url"
"In Proceedings of SIGGRAPH 97.<br/><br/>");
case FP_QUADRIC_TEXCOORD_SIMPLIFICATION : return tr("Simplify a textured mesh using a Quadric based Edge Collapse Strategy preserving UV parametrization. "
"Inspired in the QSLIM surface simplification algorithm "
"by Michael Garland, which turned into the industry standard method for mesh simplification."
"<br> See: <br>"
"<i>M. Garland and P. Heckbert.</i> <br>"
"<b>Simplifying Surfaces with Color and Texture using Quadric Error Metrics</b> (<a href='path_to_url"
" In Proceedings of IEEE Visualization 98.<br/><br/>");
case FP_EXPLICIT_ISOTROPIC_REMESHING : return tr("Perform a explicit remeshing of a triangular mesh, by repeatedly applying edge flip, collapse, relax and refine operations to regularize size and aspect ration of the triangular meshing. Loosely inspired to:<br>"
"Hugues Hoppe, Tony DeRose, Tom Duchamp, John McDonald, and Werner Stuetzle.<br>"
"Mesh optimization<br>"
"(SIGGRAPH '93). ACM, New York, NY, USA, 1926. <a href='path_to_url");
case FP_REORIENT : return tr("Re-orient in a consistent way all the faces of the mesh. <br>"
"The filter visits a mesh face to face, reorienting any unvisited face so that it is coherent "
"to the already visited faces. If the surface is orientable it will end with a consistent orientation of "
"all the faces. If the surface is not orientable (e.g. it is non manifold or non orientable like a moebius "
"strip) the filter will not build a consistent orientation simply because it is not possible. The filter can end up in a consistent orientation that can be exactly the opposite of the expected one; in that case simply invert the whole mesh orientation.");
case FP_INVERT_FACES : return tr("Invert faces orientation, flipping the normals of the mesh. <br>"
"If requested, it tries to guess the right orientation; "
"mainly it decide to flip all the faces if the minimum/maximum vertices have not outward point normals for a few directions.<br>"
"Works well for single component watertight objects.");
case FP_SCALE : return tr("Generate a matrix transformation that scale the mesh. The mesh can be also automatically scaled to a unit side box. ");
case FP_CENTER : return tr("Generate a matrix transformation that translate the mesh. The mesh can be translated around one of the axis or a given axis and w.r.t. to the origin or the baricenter, or a given point.");
case FP_ROTATE : return tr("Generate a matrix transformation that rotates the mesh. The mesh can be rotated around one of the axis or a given axis and w.r.t. to the origin or the baricenter, or a given point.");
case FP_ROTATE_FIT : return tr("Generate a matrix transformation that rotates the mesh so that the selection fits one of the main planes XY YZ ZX. May also translate such that the selection centroid rest on the origin. It reports on the log the average error of the fitting (in mesh units).");
case FP_PRINCIPAL_AXIS : return tr("Generate a matrix transformation that rotates the mesh aligning it to its principal axis of inertia."
"If the mesh is watertight the Itertia tensor is computed assuming the interior of the mesh has a uniform density."
"In case of an open mesh or a point clouds the inerta tensor is computed assuming each vertex is a constant puntual mass.");
case FP_FLIP_AND_SWAP : return tr("Generate a matrix transformation that flips each one of the axis or swaps a couple of axis. The listed transformations are applied in that order. This kind of transformation cannot be applied to set of Raster!");
case FP_RESET_TRANSFORM : return tr("Set the current transformation matrix to the Identity. ");
case FP_FREEZE_TRANSFORM : return tr("Freeze the current transformation matrix into the coordinates of the vertices of the mesh (and set this matrix to the identity). In other words it applies in a definetive way the current matrix to the vertex coordinates.");
case FP_INVERT_TRANSFORM : return tr("Invert the current transformation matrix. The current transformation is reversed, becoming its opposite.");
case FP_SET_TRANSFORM_PARAMS : return tr("Set the current transformation matrix starting from parameters: [XYZ] translation, [XYZ] Euler angles rotation and [XYZ] scaling.");
case FP_SET_TRANSFORM_MATRIX : return tr("Set the current transformation matrix by filling it, or copying from another layer.");
case FP_NORMAL_EXTRAPOLATION : return tr("Compute the normals of the vertices of a mesh without exploiting the triangle connectivity, useful for dataset with no faces");
case FP_NORMAL_SMOOTH_POINTCLOUD : return tr("Smooth the normals of the vertices of a mesh without exploiting the triangle connectivity, useful for dataset with no faces");
case FP_COMPUTE_PRINC_CURV_DIR : return tr("Compute the principal directions of curvature with different algorithms");
case FP_CLOSE_HOLES : return tr("Close holes whose boundary is composed by a number of edges smaller than a given trheshold");
case FP_CYLINDER_UNWRAP : return tr("Unwrap the geometry of current mesh along a clylindrical equatorial projection. The cylindrical projection axis is centered on the origin and directed along the vertical <b>Y</b> axis.");
case FP_QUAD_PAIRING : return tr("Convert a tri-mesh into a quad mesh by pairing triangles.");
case FP_QUAD_DOMINANT : return tr("Convert a tri-mesh into a quad-dominant mesh by pairing suitable triangles.");
case FP_MAKE_PURE_TRI : return tr("Convert into a tri-mesh by splitting any polygonal face.");
case FP_FAUX_CREASE : return tr("It select the crease edges of a mesh according to edge dihedral angle.<br>"
"Angle between face normal is considered signed according to convexity/concavity."
"Convex angles are positive and concave are negative.");
case FP_VATTR_SEAM : return tr("Make all selected vertex attributes connectivity-independent:<br/>"
"vertices are duplicated whenever two or more selected wedge or face attributes do not match.<br/>"
"This is particularly useful for GPU-friendly mesh layout, where a single index must be used to access all required vertex attributes.");
case FP_SLICE_WITH_A_PLANE : return tr("Compute the polyline representing a planar section (a slice) of a mesh; if the resulting polyline is closed the result is filled and also a triangular mesh representing the section is saved");
case FP_PERIMETER_POLYLINE : return tr("Create a new Layer with an edge mesh (polyline) composed by the selected edges of the current mesh. It can be used to convert the boundary edges of a mesh into a polyline by selecting all the faces of the mesh.");
case FP_FAUX_EXTRACT : return tr("Create a new Layer with an edge mesh composed only by the selected edges of the current mesh");
default : assert(0);
}
return QString();
}
// this function builds and initializes with the default values (that can depend on the current mesh or selection)
// the list of parameters that a filter requires.
// return
// true if has some parameters
// false is has no params
RichParameterList ExtraMeshFilterPlugin::initParameterList(const QAction * action, const MeshModel & m)
{
RichParameterList parlst;
float maxVal;
QStringList curvCalcMethods;
QStringList curvColorMethods;
QStringList loopWeightLst;
switch(ID(action))
{
case FP_COMPUTE_PRINC_CURV_DIR:
maxVal = m.cm.bbox.Diag();
curvCalcMethods.push_back("Taubin approximation");
curvCalcMethods.push_back("Principal Component Analysis");
curvCalcMethods.push_back("Normal Cycles");
curvCalcMethods.push_back("Quadric Fitting");
curvCalcMethods.push_back("Scale Dependent Quadric Fitting");
curvColorMethods << "Mean Curvature"<<"Gaussian Curvature"<<"Min Curvature"<<"Max Curvature" << "Shape Index"<< "CurvedNess" <<"None";
parlst.addParam(RichEnum("Method", 3, curvCalcMethods, tr("Method:"), tr("Choose a method")));
parlst.addParam(RichEnum("CurvColorMethod", 0, curvColorMethods, tr("Quality/Color Mapping"), QString("Choose the curvature that is mapped into quality and visualized as per vertex color.")));
parlst.addParam(RichPercentage("Scale",maxVal*0.1,0,maxVal,"Curvature Scale","This parameter is used only for scale dependent methods: 'Scale Dependent Quadric Fitting' and 'PCA'."
" It specifies the scale at which the curvature is computed. e.g. for SDQF it specify how large is the patch where we fit the quadric used to compute curvature dirs."));
parlst.addParam(RichBool("Autoclean",true,"Remove Unreferenced Vertices","If selected, before starting the filter will remove any unreference vertex (for which curvature values are not defined)"));
break;
case FP_QUADRIC_SIMPLIFICATION:
parlst.addParam(RichInt ("TargetFaceNum", (m.cm.sfn>0) ? m.cm.sfn/2 : m.cm.fn/2,"Target number of faces", "The desired final number of faces."));
parlst.addParam(RichFloat("TargetPerc", 0,"Percentage reduction (0..1)", "If non zero, this parameter specifies the desired final size of the mesh as a percentage of the initial size."));
parlst.addParam(RichFloat("QualityThr",lastq_QualityThr,"Quality threshold","Quality threshold for penalizing bad shaped faces.<br>The value is in the range [0..1]\n 0 accept any kind of face (no penalties),\n 0.5 penalize faces with quality < 0.5, proportionally to their shape\n"));
parlst.addParam(RichBool ("PreserveBoundary",lastq_PreserveBoundary,"Preserve Boundary of the mesh","The simplification process tries to do not affect mesh boundaries during simplification"));
parlst.addParam(RichFloat("BoundaryWeight",lastq_BoundaryWeight,"Boundary Preserving Weight","The importance of the boundary during simplification. Default (1.0) means that the boundary has the same importance of the rest. Values greater than 1.0 raise boundary importance and has the effect of removing less vertices on the border. Admitted range of values (0,+inf). "));
parlst.addParam(RichBool ("PreserveNormal",lastq_PreserveNormal,"Preserve Normal","Try to avoid face flipping effects and try to preserve the original orientation of the surface"));
parlst.addParam(RichBool ("PreserveTopology",lastq_PreserveTopology,"Preserve Topology","Avoid all the collapses that should cause a topology change in the mesh (like closing holes, squeezing handles, etc). If checked the genus of the mesh should stay unchanged."));
parlst.addParam(RichBool ("OptimalPlacement",lastq_OptimalPlacement,"Optimal position of simplified vertices","Each collapsed vertex is placed in the position minimizing the quadric error.\n It can fail (creating bad spikes) in case of very flat areas. \nIf disabled edges are collapsed onto one of the two original vertices and the final mesh is composed by a subset of the original vertices. "));
parlst.addParam(RichBool ("PlanarQuadric",lastq_PlanarQuadric,"Planar Simplification","Add additional simplification constraints that improves the quality of the simplification of the planar portion of the mesh, as a side effect, more triangles will be preserved in flat areas (allowing better shaped triangles)."));
parlst.addParam(RichFloat("PlanarWeight",lastq_PlanarWeight,"Planar Simp. Weight","How much we should try to preserve the triangles in the planar regions. If you lower this value planar areas will be simplified more."));
parlst.addParam(RichBool ("QualityWeight",lastq_QualityWeight,"Weighted Simplification","Use the Per-Vertex quality as a weighting factor for the simplification. The weight is used as a error amplification value, so a vertex with a high quality value will not be simplified and a portion of the mesh with low quality values will be aggressively simplified."));
parlst.addParam(RichBool ("AutoClean",true,"Post-simplification cleaning","After the simplification an additional set of steps is performed to clean the mesh (unreferenced vertices, bad faces, etc)"));
parlst.addParam(RichBool ("Selected",m.cm.sfn>0,"Simplify only selected faces","The simplification is applied only to the selected set of faces.\n Take care of the target number of faces!"));
break;
case FP_QUADRIC_TEXCOORD_SIMPLIFICATION:
parlst.addParam(RichInt ("TargetFaceNum", (m.cm.sfn>0) ? m.cm.sfn/2 : m.cm.fn/2,"Target number of faces"));
parlst.addParam(RichFloat("TargetPerc", 0,"Percentage reduction (0..1)", "If non zero, this parameter specifies the desired final size of the mesh as a percentage of the initial mesh."));
parlst.addParam(RichFloat("QualityThr",lastqtex_QualityThr,"Quality threshold","Quality threshold for penalizing bad shaped faces.<br>The value is in the range [0..1]\n 0 accept any kind of face (no penalties),\n 0.5 penalize faces with quality < 0.5, proportionally to their shape\n"));
parlst.addParam(RichFloat("Extratcoordw",lastqtex_extratw,"Texture Weight","Additional weight for each extra Texture Coordinates for every (selected) vertex"));
parlst.addParam(RichBool ("PreserveBoundary",lastq_PreserveBoundary,"Preserve Boundary of the mesh","The simplification process tries not to destroy mesh boundaries"));
parlst.addParam(RichFloat("BoundaryWeight",lastq_BoundaryWeight,"Boundary Preserving Weight","The importance of the boundary during simplification. Default (1.0) means that the boundary has the same importance of the rest. Values greater than 1.0 raise boundary importance and has the effect of removing less vertices on the border. Admitted range of values (0,+inf). "));
parlst.addParam(RichBool ("OptimalPlacement",lastq_OptimalPlacement,"Optimal position of simplified vertices","Each collapsed vertex is placed in the position minimizing the quadric error.\n It can fail (creating bad spikes) in case of very flat areas. \nIf disabled edges are collapsed onto one of the two original vertices and the final mesh is composed by a subset of the original vertices. "));
parlst.addParam(RichBool ("PreserveNormal",lastq_PreserveNormal,"Preserve Normal","Try to avoid face flipping effects and try to preserve the original orientation of the surface"));
parlst.addParam(RichBool ("PlanarQuadric",lastq_PlanarQuadric,"Planar Simplification","Add additional simplification constraints that improves the quality of the simplification of the planar portion of the mesh."));
parlst.addParam(RichBool ("Selected",m.cm.sfn>0,"Simplify only selected faces","The simplification is applied only to the selected set of faces.\n Take care of the target number of faces!"));
break;
case FP_EXPLICIT_ISOTROPIC_REMESHING:
parlst.addParam(RichInt ("Iterations", lastisor_Iterations, "Iterations", "Number of iterations of the remeshing operations to repeat on the mesh."));
parlst.addParam(RichBool ("Adaptive", lastisor_RemeshingAdaptivity, "Adaptive remeshing", "Toggles adaptive isotropic remeshing." ));
parlst.addParam(RichBool ("SelectedOnly", lastisor_SelectedOnly, "Remesh only selected faces", "If checked the remeshing operations will be applied only to the selected faces."));
maxVal = m.cm.bbox.Diag();
parlst.addParam(RichPercentage("TargetLen",maxVal*0.01,0,maxVal,"Target Length", "Sets the target length for the remeshed mesh edges."));
parlst.addParam(RichFloat ("FeatureDeg", lastisor_FeatureDeg, "Crease Angle", "Minimum angle between faces of the original to consider the shared edge as a feature to be preserved."));
parlst.addParam(RichBool ("CheckSurfDist", lastisor_CheckSurfDist, "Check Surface Distance", "If toggled each local operation must deviate from original mesh by [Max. surface distance]"));
parlst.addParam(RichPercentage ("MaxSurfDist", maxVal*0.01,0,maxVal, "Max. Surface Distance", "Maximal surface deviation allowed for each local operation"));
parlst.addParam(RichBool ("SplitFlag", lastisor_RefineFlag, "Refine Step", "If checked the remeshing operations will include a refine step."));
parlst.addParam(RichBool ("CollapseFlag", lastisor_CollapseFlag, "Collapse Step", "If checked the remeshing operations will include a collapse step."));
parlst.addParam(RichBool ("SwapFlag", lastisor_SwapFlag, "Edge-Swap Step", "If checked the remeshing operations will include a edge-swap step, aimed at improving the vertex valence of the resulting mesh."));
parlst.addParam(RichBool ("SmoothFlag", lastisor_SmoothFlag, "Smooth Step", "If checked the remeshing operations will include a smoothing step, aimed at relaxing the vertex positions in a Laplacian sense."));
parlst.addParam(RichBool ("ReprojectFlag", lastisor_ProjectFlag, "Reproject Step", "If checked the remeshing operations will include a step to reproject the mesh vertices on the original surface."));
break;
case FP_CLOSE_HOLES:
parlst.addParam(RichInt ("MaxHoleSize",(int)30,"Max size to be closed ","The size is expressed as number of edges composing the hole boundary"));
parlst.addParam(RichBool("Selected",m.cm.sfn>0,"Close holes with selected faces","Only the holes with at least one of the boundary faces selected are closed"));
parlst.addParam(RichBool("NewFaceSelected",true,"Select the newly created faces","After closing a hole the faces that have been created are left selected. Any previous selection is lost. Useful for example for smoothing the newly created holes."));
parlst.addParam(RichBool("SelfIntersection",true,"Prevent creation of selfIntersecting faces","When closing an holes it tries to prevent the creation of faces that intersect faces adjacent to the boundary of the hole. It is an heuristic, non intersetcting hole filling can be NP-complete."));
parlst.addParam(RichBool("RefineHole",false,"Refine Filled Hole","After closing the hole it will refine the newly created triangles to make the surface more smooth and the triangulation more evenly spaced"));
maxVal = m.cm.bbox.Diag();
parlst.addParam(RichPercentage("RefineHoleEdgeLen",maxVal*0.03,0,maxVal,"Hole Refinement Edge Len", "The target edge lenght of the triangulation inside the filled hole."));
break;
case FP_LOOP_SS:
case FP_REFINE_LS3_LOOP:
loopWeightLst << "Loop" << "Enhance regularity" << "Enhance continuity";
parlst.addParam(RichEnum("LoopWeight", 0, loopWeightLst, "Weighting scheme", "Change the weights used. Allows one to optimize some behaviors over others."));
// fall through
case FP_BUTTERFLY_SS:
case FP_MIDPOINT:
parlst.addParam(RichInt("Iterations", 3, "Iterations", "Number of time the model is subdivided."));
maxVal = m.cm.bbox.Diag();
parlst.addParam(RichPercentage("Threshold",maxVal*0.01,0,maxVal,"Edge Threshold", "All the edges <b>longer</b> than this threshold will be refined.<br>Setting this value to zero will force an uniform refinement."));
parlst.addParam(RichBool ("Selected",m.cm.sfn>0,"Affect only selected faces","If selected the filter affect only the selected faces"));
break;
case FP_REFINE_DOOSABIN:
parlst.addParam(RichInt("Iterations", 2, "Iterations", "Number of times the model is subdivided."));
break;
case FP_CLUSTERING:
// TODO implement selection
maxVal = m.cm.bbox.Diag();
parlst.addParam(RichPercentage(
"Threshold",
maxVal * 0.01,
0,
maxVal,
"Cell Size",
"The size of the cell of the clustering grid. Smaller the cell finer the resulting "
"mesh. For obtaining a very coarse mesh use larger values."));
//TODO: implement selection on clustering algorithm
// parlst.addParam(RichBool(
// "Selected",
// m.cm.sfn > 0,
// "Affect only selected points/faces",
// "If selected the filter affect only the selected points/faces"));
break;
case FP_CYLINDER_UNWRAP:
parlst.addParam(RichFloat("startAngle", 0,"Start angle (deg)", "The starting angle of the unrolling process."));
parlst.addParam(RichFloat("endAngle",360,"End angle (deg)","The ending angle of the unrolling process. Quality threshold for penalizing bad shaped faces.<br>The value is in the range [0..1]\n 0 accept any kind of face (no penalties),\n 0.5 penalize faces with quality < 0.5, proportionally to their shape\n"));
parlst.addParam(RichFloat("radius", 0,"Projection Radius", "If non zero, this parameter specifies the desired radius of the reference cylinder used for the projection. Changing this parameter affect the <b>X</b> horizontal scaling of the resulting mesh. If zero (default) the average distance of the mesh from the axis is chosen."));
break;
case FP_FLIP_AND_SWAP:
parlst.addParam(RichBool ("flipX",false,"Flip X axis","If selected the axis will be swapped (mesh mirrored along the YZ plane"));
parlst.addParam(RichBool ("flipY",false,"Flip Y axis","If selected the axis will be swapped (mesh mirrored along the XZ plane"));
parlst.addParam(RichBool ("flipZ",false,"Flip Z axis","If selected the axis will be swapped (mesh mirrored along the XY plane"));
parlst.addParam(RichBool ("swapXY",false,"Swap X-Y axis","If selected the two axis will be swapped. All the swaps are performed in this order"));
parlst.addParam(RichBool ("swapXZ",false,"Swap X-Z axis","If selected the two axis will be swapped. All the swaps are performed in this order"));
parlst.addParam(RichBool ("swapYZ",false,"Swap Y-Z axis","If selected the two axis will be swapped. All the swaps are performed in this order"));
parlst.addParam(RichBool ("Freeze",true,"Freeze Matrix","The transformation is explicitly applied, and the vertex coordinates are actually changed"));
parlst.addParam(RichBool ("allLayers",false,"Apply to all visible Layers","If selected the filter will be applied to all visible mesh layers"));
break;
case FP_RESET_TRANSFORM:
parlst.addParam(RichBool ("allLayers",false,"Apply to all visible Layers","If selected the filter will be applied to all visible mesh layers"));
break;
case FP_FREEZE_TRANSFORM:
parlst.addParam(RichBool ("allLayers",false,"Apply to all visible Layers","If selected the filter will be applied to all visible mesh layers"));
break;
case FP_INVERT_TRANSFORM:
parlst.addParam(RichBool("Freeze", true, "Freeze Matrix", "The transformation is explicitly applied, and the vertex coordinates are actually changed"));
parlst.addParam(RichBool("allLayers", false, "Apply to all visible Layers", "If selected the filter will be applied to all visible mesh layers"));
break;
case FP_SET_TRANSFORM_MATRIX:
{
Matrix44m mat; mat.SetIdentity();
parlst.addParam(RichMatrix44("TransformMatrix", mat, ""));
parlst.addParam(RichBool("compose", false, "Compose with current", "If selected, the new matrix will be composed with the current one (matrix=new*old)"));
parlst.addParam(RichBool("Freeze", true, "Freeze Matrix", "The transformation is explicitly applied, and the vertex coordinates are actually changed"));
parlst.addParam(RichBool ("allLayers",false,"Apply to all visible Layers","If selected, the filter will be applied to all visible mesh layers"));
}
break;
case FP_SET_TRANSFORM_PARAMS:
{
parlst.addParam(RichFloat("translationX", 0, "X Translation", "Translation factor on X axis"));
parlst.addParam(RichFloat("translationY", 0, "Y Translation", "Translation factor on Y axis"));
parlst.addParam(RichFloat("translationZ", 0, "Z Translation", "Translation factor on Z axis"));
parlst.addParam(RichFloat("rotationX", 0, "X Rotation", "Rotation angle on X axis"));
parlst.addParam(RichFloat("rotationY", 0, "Y Rotation", "Rotation angle on Y axis"));
parlst.addParam(RichFloat("rotationZ", 0, "Z Rotation", "Rotation angle on Z axis"));
parlst.addParam(RichFloat("scaleX", 1, "X Scale", "Scaling factor on X axis"));
parlst.addParam(RichFloat("scaleY", 1, "Y Scale", "Scaling factor on Y axis"));
parlst.addParam(RichFloat("scaleZ", 1, "Z Scale", "Scaling factor on Z axis"));
parlst.addParam(RichBool("compose", false, "Compose with current", "If selected, the new matrix will be composed with the current one (matrix=new*old)"));
parlst.addParam(RichBool("Freeze", true, "Freeze Matrix", "The transformation is explicitly applied, and the vertex coordinates are actually changed"));
parlst.addParam(RichBool ("allLayers",false,"Apply to all visible Layers","If selected the filter will be applied to all visible mesh layers"));
}
break;
case FP_ROTATE_FIT:
{
QStringList planes;
planes.push_back("XY plane");
planes.push_back("YZ plane");
planes.push_back("ZX plane");
parlst.addParam(RichEnum("targetPlane", 0, planes, "Rotate to fit:", "Choose the plane where the selection will fit"));
QStringList raxis;
raxis.push_back("any axis");
raxis.push_back("X axis");
raxis.push_back("Y axis");
raxis.push_back("Z axis");
parlst.addParam(RichEnum("rotAxis", 0, raxis, "Rotate on:", "Choose on which axis do the rotation: 'any axis' guarantee the best fit of the selection to the plane, only use X,Y or Z it if you want to preserve that specific axis."));
parlst.addParam(RichBool("ToOrigin", true, "Move to Origin", "Also apply a translation, such that the centroid of selection rests on the Origin"));
parlst.addParam(RichBool("Freeze",true,"Freeze Matrix","The transformation is explicitly applied, and the vertex coordinates are actually changed"));
parlst.addParam(RichBool ("allLayers",false,"Apply to all visible Layers","If selected the filter will be applied to all visible mesh layers"));
}
break;
case FP_ROTATE:
{
QStringList rotMethod;
rotMethod.push_back("X axis");
rotMethod.push_back("Y axis");
rotMethod.push_back("Z axis");
rotMethod.push_back("custom axis");
parlst.addParam(RichEnum("rotAxis", 0, rotMethod, tr("Rotation on:"), tr("Choose a method")));
QStringList rotCenter;
rotCenter.push_back("origin");
rotCenter.push_back("barycenter");
rotCenter.push_back("custom point");
parlst.addParam(RichEnum("rotCenter", 0, rotCenter, tr("Center of rotation:"), tr("Choose a method")));
parlst.addParam(RichDynamicFloat("angle",0,-360,360,"Rotation Angle","Angle of rotation (in <b>degree</b>). If snapping is enabled this value is rounded according to the snap value"));
parlst.addParam(RichDirection("customAxis",Point3f(0,0,0),"Custom axis","This rotation axis is used only if the 'custom axis' option is chosen."));
parlst.addParam(RichPosition("customCenter",Point3f(0,0,0),"Custom center","This rotation center is used only if the 'custom point' option is chosen."));
parlst.addParam(RichBool("snapFlag", false, "Snap angle", "If selected, before starting the filter will remove any unreferenced vertex (for which curvature values are not defined)"));
parlst.addParam(RichFloat("snapAngle",30,"Snapping Value","This value is used to snap the rotation angle (i.e. if the snapping value is 30, 227 becomes 210)."));
parlst.addParam(RichBool ("Freeze",true,"Freeze Matrix","The transformation is explicitly applied, and the vertex coordinates are actually changed"));
parlst.addParam(RichBool ("allLayers",false,"Apply to all visible Layers","If selected the filter will be applied to all visible mesh layers"));
}
break;
case FP_PRINCIPAL_AXIS:
parlst.addParam(RichBool("pointsFlag",true,"Use vertex","If selected, only the vertices of the mesh are used to compute the Principal Axis. Mandatory for point clouds or for non water tight meshes"));
parlst.addParam(RichBool ("Freeze",true,"Freeze Matrix","The transformation is explicitly applied, and the vertex coordinates are actually changed"));
parlst.addParam(RichBool ("allLayers",false,"Apply to all visible Layers","If selected the filter will be applied to all visible mesh layers"));
break;
case FP_CENTER:
{
QStringList traslMethod;
traslMethod.push_back("XYZ translation");
traslMethod.push_back("Center on Scene BBox");
traslMethod.push_back("Center on Layer BBox");
traslMethod.push_back("Set new Origin");
parlst.addParam(RichEnum("traslMethod", 0, traslMethod, tr("Transformation:"), tr("[XYZ translation] adds X,Y and Z offset to Layer transformation, [Center on BBox] moves Layer Origin to the Bounding Box center, [Set new Origin] moves Layer Origin to a specific point")));
const Box3m &bb=m.cm.bbox;
parlst.addParam(RichDynamicFloat("axisX",0,-5.0*bb.Diag(),5.0*bb.Diag(),"X Axis","when using [XYZ translation], amount of translation along the X axis (in model units)"));
parlst.addParam(RichDynamicFloat("axisY",0,-5.0*bb.Diag(),5.0*bb.Diag(),"Y Axis","when using [XYZ translation], amount of translation along the Y axis (in model units)"));
parlst.addParam(RichDynamicFloat("axisZ",0,-5.0*bb.Diag(),5.0*bb.Diag(),"Z Axis","when using [XYZ translation], amount of translation along the Z axis (in model units)"));
parlst.addParam(RichPosition("newOrigin", Point3f(0, 0, 0), "New Origin:", "when using [Set new Origin], this is the location of the new Origin."));
parlst.addParam(RichBool ("Freeze",true,"Freeze Matrix","The transformation is explicitly applied, and the vertex coordinates are actually changed"));
parlst.addParam(RichBool ("allLayers",false,"Apply to all visible Layers","If selected the filter will be applied to all visible mesh layers"));
}
break;
case FP_SCALE:
{
parlst.addParam(RichFloat("axisX",1,"X Axis","Scaling"));
parlst.addParam(RichFloat("axisY",1,"Y Axis","Scaling"));
parlst.addParam(RichFloat("axisZ",1,"Z Axis","Scaling"));
parlst.addParam(RichBool("uniformFlag",true,"Uniform Scaling","If selected an uniform scaling (the same for all the three axis) is applied (the X axis value is used)"));
QStringList scaleCenter;
scaleCenter.push_back("origin");
scaleCenter.push_back("barycenter");
scaleCenter.push_back("custom point");
parlst.addParam(RichEnum("scaleCenter", 0, scaleCenter, tr("Center of scaling:"), tr("Choose a method")));
parlst.addParam(RichPosition("customCenter",Point3f(0,0,0),"Custom center","This scaling center is used only if the 'custom point' option is chosen."));
parlst.addParam(RichBool("unitFlag",false,"Scale to Unit bbox","If selected, the object is scaled to a box whose sides are at most 1 unit length"));
parlst.addParam(RichBool ("Freeze",true,"Freeze Matrix","The transformation is explicitly applied, and the vertex coordinates are actually changed"));
parlst.addParam(RichBool ("allLayers",false,"Apply to all visible Layers","If selected the filter will be applied to all visible mesh layers"));
}
break;
case FP_INVERT_FACES:
parlst.addParam(RichBool("forceFlip", true, "Force Flip", "If selected, the normals will always be flipped; otherwise, the filter tries to set them outside"));
parlst.addParam(RichBool("onlySelected", false, "Flip only selected faces", "If selected, only selected faces will be affected"));
break;
case FP_FAUX_CREASE:
parlst.addParam(RichFloat ("AngleDegNeg",-45.0f,"Concave Angle Thr. (deg)","Concave Dihedral Angle threshold for considering an edge a crease. If the normals between two faces forms an concave diheadral angle smaller than the threshold the edge is considered a crease."));
parlst.addParam(RichFloat ("AngleDegPos", 45.0f,"Convex Angle Thr. (deg)","The angle threshold for considering an edge a crease. If the normals between two faces forms an angle larger than the threshold the edge is considered a crease."));
break;
case FP_NORMAL_EXTRAPOLATION:
parlst.addParam(RichInt ("K",(int)10,"Neighbour num","The number of neighbors used to estimate normals."));
parlst.addParam(RichInt ("smoothIter",0,"Smooth Iteration","The number of smoothing iteration done on the p used to estimate and propagate normals."));
parlst.addParam(RichBool("flipFlag",false,"Flip normals w.r.t. viewpoint","If the 'viewpoint' (i.e. scanner position) is known, it can be used to disambiguate normals orientation, so that all the normals will be oriented in the same direction."));
parlst.addParam(RichPosition("viewPos",m.cm.shot.Extrinsics.Tra(),"Viewpoint Pos.","The viewpoint position can be set by hand (i.e. getting the current viewpoint) or it can be retrieved from mesh camera, if the viewpoint position is stored there."));
break;
case FP_NORMAL_SMOOTH_POINTCLOUD:
parlst.addParam(RichInt ("K",(int)10,"Number of neighbors","The number of neighbors used to smooth normals."));
parlst.addParam(RichBool("useDist",false,"Weight using neighbour distance","If selected, the neighbour normals are waighted according to their distance"));
break;
case FP_VATTR_SEAM:
{
QStringList normalMethod; normalMethod << "None" << "Vertex" << "Wedge" << "Face";
parlst.addParam(RichEnum("NormalMode", 0, normalMethod, tr("Normal Source:"), tr("Choose a method")));
QStringList colorMethod; colorMethod << "None" << "Vertex" << "Wedge" << "Face";
parlst.addParam(RichEnum("ColorMode", 0, colorMethod, tr("Color Source:"), tr("Choose a method")));
QStringList texcoordMethod;texcoordMethod << "None" << "Vertex" << "Wedge";
parlst.addParam(RichEnum("TexcoordMode", 0, texcoordMethod, tr("Texcoord Source:"), tr("Choose a method")));
}
break;
case FP_PERIMETER_POLYLINE:
break;
case FP_SLICE_WITH_A_PLANE:
{
QStringList axis = QStringList() <<"X Axis"<<"Y Axis"<<"Z Axis"<<"Custom Axis";
parlst.addParam(RichEnum ("planeAxis", 0, axis, tr("Plane perpendicular to"), tr("The Slicing plane will be done perpendicular to the axis")));
parlst.addParam(RichDirection("customAxis",Point3f(0,1,0),"Custom axis","Specify a custom axis, this is only valid if the above parameter is set to Custom"));
parlst.addParam(RichFloat ("planeOffset", 0.0, "Cross plane offset", "Specify an offset of the cross-plane. The offset corresponds to the distance from the point specified in the plane reference parameter. By default (Cross plane offset == 0)"));
parlst.addParam(RichEnum ("relativeTo",2,QStringList()<<"Bounding box center"<<"Bounding box min"<<"Origin","plane reference","Specify the reference from which the planes are shifted"));
parlst.addParam(RichBool("createSectionSurface",false,"Create also section surface","If selected, in addition to a layer with the section polyline, it will be created also a layer with a triangulated version of the section polyline. This only works if the section polyline is closed"));
parlst.addParam(RichBool("splitSurfaceWithSection",false,"Create also split surfaces","If selected, it will create two layers with the portion of the mesh under and over the section plane. It requires manifoldness of the mesh."));
}
break;
case FP_QUAD_DOMINANT:
{
QStringList opt = QStringList() <<"Fewest triangles"<< "(in between)" <<"Better quad shape";
parlst.addParam(RichEnum ("level", 0, opt, tr("Optimize For:"), tr("Choose any of three different greedy strategies.")));
}
break;
default:
break;
}
return parlst;
}
void Freeze(MeshModel *m)
{
tri::UpdatePosition<CMeshO>::Matrix(m->cm, m->cm.Tr,true);
tri::UpdateBounding<CMeshO>::Box(m->cm);
m->cm.shot.ApplyRigidTransformation(m->cm.Tr);
m->cm.Tr.SetIdentity();
}
void ApplyTransform(MeshDocument &md, const Matrix44m &tr, bool toAllFlag, bool freeze,
bool invertFlag=false, bool composeFlage=true)
{
if(toAllFlag) {
MeshModel* m = nullptr;
while ((m=md.nextVisibleMesh(m))) {
if(invertFlag) m->cm.Tr = Inverse(m->cm.Tr);
if(composeFlage) m->cm.Tr = tr * m->cm.Tr;
else m->cm.Tr=tr;
if(freeze) Freeze(m);
}
for (RasterModel& rm : md.rasterIterator())
if (rm.isVisible())
rm.shot.ApplyRigidTransformation(tr);
}
else {
MeshModel* m = md.mm();
if(invertFlag) m->cm.Tr = Inverse(m->cm.Tr);
if(composeFlage) m->cm.Tr = tr * m->cm.Tr;
else m->cm.Tr=tr;
if(freeze) Freeze(md.mm());
}
}
std::map<std::string, QVariant> ExtraMeshFilterPlugin::applyFilter(
const QAction * filter,
const RichParameterList & par,
MeshDocument & md,
unsigned int& /*postConditionMask*/,
vcg::CallBackPos * cb)
{
std::map<std::string, QVariant> outputValues;
MeshModel & m = *md.mm();
switch(ID(filter))
{
case FP_LOOP_SS:
case FP_BUTTERFLY_SS:
case FP_MIDPOINT:
case FP_REFINE_LS3_LOOP:
{
tri::Allocator<CMeshO>::CompactFaceVector(m.cm);
tri::Allocator<CMeshO>::CompactVertexVector(m.cm);
m.updateDataMask( MeshModel::MM_FACEFACETOPO);
tri::UpdateFlags<CMeshO>::FaceBorderFromFF(m.cm);
if ( tri::Clean<CMeshO>::CountNonManifoldEdgeFF(m.cm) > 0)
{
throw MLException("Mesh has some not 2 manifoldfaces, subdivision surfaces require manifoldness"); // text
}
bool selected = par.getBool("Selected");
Scalarm threshold = par.getAbsPerc("Threshold");
int iterations = par.getInt("Iterations");
for(int i=0; i<iterations; ++i)
{
m.updateDataMask(MeshModel::MM_VERTFACETOPO);
switch(ID(filter))
{
case FP_LOOP_SS :
switch(par.getEnum("LoopWeight"))
{
case 0:
tri::RefineOddEven<CMeshO/*, tri::OddPointLoop<CMeshO>, tri::EvenPointLoop<CMeshO>*/ >
(m.cm, tri::OddPointLoop<CMeshO>(m.cm), tri::EvenPointLoop<CMeshO>(), threshold, selected, cb);
break;
case 1:
tri::RefineOddEven<CMeshO/*,
tri::OddPointLoopGeneric<CMeshO, Centroid<CMeshO>, RegularLoopWeight<CMeshO::ScalarType> >,
tri::EvenPointLoopGeneric<CMeshO, Centroid<CMeshO>, RegularLoopWeight<CMeshO::ScalarType> >*/ >
(m.cm, tri::OddPointLoopGeneric<CMeshO, vcg::tri::Centroid<CMeshO>, RegularLoopWeight<CMeshO::ScalarType> >(m.cm),
tri::EvenPointLoopGeneric<CMeshO, vcg::tri::Centroid<CMeshO>, RegularLoopWeight<CMeshO::ScalarType> >(), threshold, selected, cb);
break;
case 2:
tri::RefineOddEven<CMeshO/*,
tri::OddPointLoopGeneric<CMeshO, Centroid<CMeshO>, ContinuityLoopWeight<CMeshO::ScalarType> >,
tri::EvenPointLoopGeneric<CMeshO, Centroid<CMeshO>, ContinuityLoopWeight<CMeshO::ScalarType> >*/ >
(m.cm, tri::OddPointLoopGeneric<CMeshO, vcg::tri::Centroid<CMeshO>, vcg::tri::ContinuityLoopWeight<CMeshO::ScalarType> >(m.cm),
tri::EvenPointLoopGeneric<CMeshO, vcg::tri::Centroid<CMeshO>, ContinuityLoopWeight<CMeshO::ScalarType> >(), threshold, selected, cb);
break;
}
break;
case FP_BUTTERFLY_SS :
Refine<CMeshO,MidPointButterfly<CMeshO> > (m.cm, MidPointButterfly<CMeshO>(m.cm), threshold, selected, cb);
break;
case FP_MIDPOINT :
Refine<CMeshO,MidPoint<CMeshO> > (m.cm, MidPoint<CMeshO>(&m.cm), threshold, selected, cb);
break;
case FP_REFINE_LS3_LOOP :
switch(par.getEnum("LoopWeight"))
{
case 0:
tri::RefineOddEven<CMeshO/*, tri::OddPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double> >, tri::EvenPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double> >*/ >
(m.cm, tri::OddPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double> >(m.cm), tri::EvenPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double> >(), threshold, selected, cb);
break;
case 1:
tri::RefineOddEven<CMeshO/*,
tri::OddPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double>, RegularLoopWeight<double> >,
tri::EvenPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double>, RegularLoopWeight<double> >*/ >
(m.cm, tri::OddPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double>, RegularLoopWeight<double> >(m.cm),
tri::EvenPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double>, RegularLoopWeight<double> >(), threshold, selected, cb);
break;
case 2:
tri::RefineOddEven<CMeshO/*,
tri::OddPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double>, ContinuityLoopWeight<double> >,
tri::EvenPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double>, ContinuityLoopWeight<double> >*/ >
(m.cm, tri::OddPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double>, ContinuityLoopWeight<double> >(m.cm),
tri::EvenPointLoopGeneric<CMeshO, LS3Projection<CMeshO, double>, ContinuityLoopWeight<double> >(), threshold, selected, cb);
break;
}
break;
}
m.clearDataMask(MeshModel::MM_VERTFACETOPO);
}
m.updateBoxAndNormals();
} break;
case FP_REORIENT:
{
m.updateDataMask(MeshModel::MM_FACEFACETOPO);
bool oriented, orientable;
if ( tri::Clean<CMeshO>::CountNonManifoldEdgeFF(m.cm)>0 )
{
throw MLException("Mesh has some not 2-manifold faces, Orientability requires manifoldness");
}
tri::Clean<CMeshO>::OrientCoherentlyMesh(m.cm, oriented,orientable);
tri::UpdateTopology<CMeshO>::FaceFace(m.cm);
tri::UpdateTopology<CMeshO>::TestFaceFace(m.cm);
m.updateBoxAndNormals();
} break;
case FP_CLUSTERING:
{
// TODO implement selection
Scalarm threshold = par.getAbsPerc("Threshold");
vcg::tri::Clustering<CMeshO, vcg::tri::AverageColorCell<CMeshO>> ClusteringGrid(
m.cm.bbox, 100000, threshold);
if(m.cm.FN() == 0) {
ClusteringGrid.AddPointSet(m.cm);
ClusteringGrid.ExtractPointSet(m.cm);
}
else {
ClusteringGrid.AddMesh(m.cm);
ClusteringGrid.ExtractMesh(m.cm);
}
m.updateBoxAndNormals();
m.clearDataMask(MeshModel::MM_FACEFACETOPO);
} break;
case FP_INVERT_FACES:
{
bool flipped=par.getBool("forceFlip");
bool onlySelected=par.getBool("onlySelected");
if(flipped)
tri::Clean<CMeshO>::FlipMesh(m.cm,onlySelected);
else
tri::Clean<CMeshO>::FlipNormalOutside(m.cm);
m.updateBoxAndNormals();
m.clearDataMask(MeshModel::MM_FACEFACETOPO);
} break;
case FP_RESET_TRANSFORM:
ApplyTransform(md, Matrix44m::Identity(), par.getBool("allLayers"), false, false, false);
break;
case FP_FREEZE_TRANSFORM:
ApplyTransform(md, Matrix44m::Identity(), par.getBool("allLayers"), true, false, true);
break;
case FP_INVERT_TRANSFORM:
ApplyTransform(md, Matrix44m::Identity(), par.getBool("allLayers"), par.getBool("Freeze"), true, true);
break;
case FP_SET_TRANSFORM_MATRIX:
ApplyTransform(md, par.getMatrix44("TransformMatrix"), par.getBool("allLayers"), par.getBool("Freeze"), false, par.getBool("compose"));
break;
case FP_SET_TRANSFORM_PARAMS:
{
Scalarm tX = par.getFloat("translationX");
Scalarm tY = par.getFloat("translationY");
Scalarm tZ = par.getFloat("translationZ");
Scalarm rX = par.getFloat("rotationX");
Scalarm rY = par.getFloat("rotationY");
Scalarm rZ = par.getFloat("rotationZ");
Scalarm sX = par.getFloat("scaleX");
Scalarm sY = par.getFloat("scaleY");
Scalarm sZ = par.getFloat("scaleZ");
Matrix44m newTransform = Matrix44m::Identity();
Matrix44m tt;
tt.SetTranslate(tX, tY, tZ);
newTransform = newTransform * tt;
if ((rX != 0.0) || (rY != 0.0) || (rZ != 0.0))
{
tt.FromEulerAngles(math::ToRad(rX), math::ToRad(rY), math::ToRad(rZ));
newTransform = newTransform * tt;
}
if ((sX != 0.0) || (sY != 0.0) || (sZ != 0.0))
{
tt.SetScale(sX, sY, sZ);
newTransform = newTransform * tt;
}
ApplyTransform(md, newTransform, par.getBool("allLayers"), par.getBool("Freeze"), false, par.getBool("compose"));
}break;
case FP_QUADRIC_SIMPLIFICATION:
{
m.updateDataMask( MeshModel::MM_VERTFACETOPO | MeshModel::MM_VERTMARK);
tri::UpdateFlags<CMeshO>::FaceBorderFromVF(m.cm);
int TargetFaceNum = par.getInt("TargetFaceNum");
if(par.getFloat("TargetPerc")!=0) TargetFaceNum = m.cm.fn*par.getFloat("TargetPerc");
tri::TriEdgeCollapseQuadricParameter pp;
pp.QualityThr=lastq_QualityThr =par.getFloat("QualityThr");
pp.PreserveBoundary=lastq_PreserveBoundary = par.getBool("PreserveBoundary");
pp.BoundaryQuadricWeight = pp.BoundaryQuadricWeight * par.getFloat("BoundaryWeight");
pp.PreserveTopology=lastq_PreserveTopology = par.getBool("PreserveTopology");
pp.QualityWeight=lastq_QualityWeight = par.getBool("QualityWeight");
pp.NormalCheck=lastq_PreserveNormal = par.getBool("PreserveNormal");
pp.OptimalPlacement=lastq_OptimalPlacement = par.getBool("OptimalPlacement");
pp.QualityQuadric=lastq_PlanarQuadric = par.getBool("PlanarQuadric");
pp.QualityQuadricWeight=lastq_PlanarWeight = par.getFloat("PlanarWeight");
lastq_Selected = par.getBool("Selected");
QuadricSimplification(m.cm,TargetFaceNum,lastq_Selected,pp, cb);
if(par.getBool("AutoClean"))
{
int nullFaces=tri::Clean<CMeshO>::RemoveFaceOutOfRangeArea(m.cm,0);
if(nullFaces) log( "PostSimplification Cleaning: Removed %d null faces", nullFaces);
int deldupvert=tri::Clean<CMeshO>::RemoveDuplicateVertex(m.cm);
if(deldupvert) log( "PostSimplification Cleaning: Removed %d duplicated vertices", deldupvert);
int delvert=tri::Clean<CMeshO>::RemoveUnreferencedVertex(m.cm);
if(delvert) log( "PostSimplification Cleaning: Removed %d unreferenced vertices",delvert);
m.clearDataMask(MeshModel::MM_FACEFACETOPO );
tri::Allocator<CMeshO>::CompactVertexVector(m.cm);
tri::Allocator<CMeshO>::CompactFaceVector(m.cm);
}
m.updateBoxAndNormals();
tri::UpdateNormal<CMeshO>::NormalizePerFace(m.cm);
tri::UpdateNormal<CMeshO>::PerVertexFromCurrentFaceNormal(m.cm);
tri::UpdateNormal<CMeshO>::NormalizePerVertex(m.cm);
} break;
case FP_QUADRIC_TEXCOORD_SIMPLIFICATION:
{
m.updateDataMask(MeshModel::MM_VERTFACETOPO | MeshModel::MM_VERTMARK);
tri::UpdateFlags<CMeshO>::FaceBorderFromVF(m.cm);
if (!tri::Clean<CMeshO>::HasConsistentPerWedgeTexCoord(m.cm)) {
throw MLException(
"Mesh has some inconsistent tex coordinates (some faces without texture)");
}
int TargetFaceNum = par.getInt("TargetFaceNum");
if (par.getFloat("TargetPerc") != 0)
TargetFaceNum = m.cm.fn * par.getFloat("TargetPerc");
tri::TriEdgeCollapseQuadricTexParameter pp;
lastqtex_QualityThr = pp.QualityThr = par.getFloat("QualityThr");
lastqtex_extratw = pp.ExtraTCoordWeight = par.getFloat("Extratcoordw");
lastq_OptimalPlacement = pp.OptimalPlacement = par.getBool("OptimalPlacement");
lastq_PreserveBoundary = pp.PreserveBoundary = par.getBool("PreserveBoundary");
pp.BoundaryWeight = pp.BoundaryWeight * par.getFloat("BoundaryWeight");
lastq_PlanarQuadric = pp.QualityQuadric = par.getBool("PlanarQuadric");
lastq_PreserveNormal = pp.NormalCheck = par.getBool("PreserveNormal");
lastq_Selected = par.getBool("Selected");
QuadricTexSimplification(m.cm, TargetFaceNum, lastq_Selected, pp, cb);
m.updateBoxAndNormals();
tri::UpdateNormal<CMeshO>::NormalizePerFace(m.cm);
tri::UpdateNormal<CMeshO>::PerVertexFromCurrentFaceNormal(m.cm);
tri::UpdateNormal<CMeshO>::NormalizePerVertex(m.cm);
}
break;
case FP_EXPLICIT_ISOTROPIC_REMESHING:
{
m.updateDataMask( MeshModel::MM_FACEFACETOPO | MeshModel::MM_VERTFACETOPO |
MeshModel::MM_VERTQUALITY | MeshModel::MM_FACEMARK |
MeshModel::MM_FACEFLAG | MeshModel::MM_VERTMARK );
tri::Clean<CMeshO>::RemoveDuplicateVertex(m.cm);
tri::Clean<CMeshO>::RemoveUnreferencedVertex(m.cm);
tri::Allocator<CMeshO>::CompactEveryVector(m.cm);
m.updateBoxAndNormals();
CMeshO toProjectCopy = m.cm;
toProjectCopy.face.EnableMark();
tri::IsotropicRemeshing<CMeshO>::Params params;
params.SetTargetLen(par.getAbsPerc("TargetLen"));
params.SetFeatureAngleDeg(par.getFloat("FeatureDeg"));
params.maxSurfDist = par.getFloat("MaxSurfDist");
params.iter = par.getInt("Iterations");
params.adapt = par.getBool("Adaptive");
params.selectedOnly = par.getBool("SelectedOnly");
params.splitFlag = par.getBool("SplitFlag");
params.collapseFlag = par.getBool("CollapseFlag");
params.swapFlag = par.getBool("SwapFlag");
params.smoothFlag = par.getBool("SmoothFlag");
params.projectFlag = par.getBool("ReprojectFlag");
params.surfDistCheck= par.getBool("CheckSurfDist");
lastisor_Iterations = params.iter;
lastisor_RemeshingAdaptivity = params.adapt;
lastisor_SelectedOnly = params.selectedOnly;
lastisor_RefineFlag = params.splitFlag;
lastisor_CollapseFlag = params.collapseFlag;
lastisor_SwapFlag = params.swapFlag;
lastisor_SmoothFlag = params.smoothFlag;
lastisor_ProjectFlag = params.projectFlag;
lastisor_CheckSurfDist = params.surfDistCheck;
lastisor_MaxSurfDist= par.getFloat("MaxSurfDist");
lastisor_FeatureDeg = par.getFloat("FeatureDeg");
try
{
tri::IsotropicRemeshing<CMeshO>::Do(m.cm, toProjectCopy, params, cb);
}
catch(vcg::MissingPreconditionException& excp)
{
log(excp.what());
throw MLException(excp.what());
}
m.updateBoxAndNormals();
// m.clearDataMask(MeshModel::MM_GEOMETRY_AND_TOPOLOGY_CHANGE | MeshModel::MM_FACEFACETOPO | MeshModel::MM_VERTQUALITY | MeshModel::MM_FACEMARK | MeshModel::MM_FACEFLAG);
} break;
case FP_ROTATE_FIT:
{
Box3m selBox; //boundingbox of the selected vertices
std::vector< Point3m > selected_pts; //copy of selected vertices, for plane fitting
if (m.cm.svn == 0 && m.cm.sfn == 0) // if no selection, fail
{
log("Cannot compute rotation: there is no selection");
throw MLException("Cannot compute rotation: there is no selection");
}
if (m.cm.svn == 0 && m.cm.sfn > 0) // if no vert selected, but some faces selected, use their vertices
{
tri::UpdateSelection<CMeshO>::VertexClear(m.cm);
tri::UpdateSelection<CMeshO>::VertexFromFaceLoose(m.cm);
}
for (CMeshO::VertexIterator vi = m.cm.vert.begin(); vi != m.cm.vert.end(); ++vi)
if (!(*vi).IsD() && (*vi).IsS())
{
Point3m p = (*vi).P();
selBox.Add(p);
selected_pts.push_back(p);
}
log("Using %i vertices to build a fitting plane", int(selected_pts.size()));
Plane3m plane;
FitPlaneToPointSet(selected_pts, plane);
float errorSum = 0;
for (size_t i = 0; i < selected_pts.size(); ++i)
errorSum += fabs(SignedDistancePlanePoint(plane, selected_pts[i]));
float fpAvgError = errorSum / float(selected_pts.size());
Point3m fpNormal(plane.Direction().X(), plane.Direction().Y(), plane.Direction().Z());
log("Fitting Plane avg error is %f", fpAvgError);
log("Fitting Plane normal is [%f, %f, %f]", fpNormal.X(), fpNormal.Y(), fpNormal.Z());
outputValues["fitting_plane_avg_error"] = QVariant::fromValue(fpAvgError);
outputValues["fitting_plane_normal"] = QVariant::fromValue(fpNormal);
Matrix44m tr1; // translation matrix the centroid of selected points
tr1.SetTranslate(-selBox.Center());
Point3m targetPlane;
Point3m rotAxis;
float angleRad;
switch (par.getEnum("targetPlane"))
{
case 0:
targetPlane = Point3m(0, 0, 1);
break;
case 1:
targetPlane = Point3m(1, 0, 0);
break;
case 2:
targetPlane = Point3m(0, 1, 0);
break;
}
rotAxis = targetPlane ^ plane.Direction();
angleRad = Angle(targetPlane, plane.Direction());
if(par.getEnum("rotAxis")!=0)
{
Point3m projDir;
switch (par.getEnum("rotAxis"))
{
case 1:
rotAxis = -Point3m(1, 0, 0);
projDir = Point3m(0.0, plane.Direction().Y(), plane.Direction().Z());
break;
case 2:
rotAxis = -Point3m(0, 1, 0);
projDir = Point3m(plane.Direction().X(), 0.0, plane.Direction().Z());
break;
case 3:
rotAxis = -Point3m(0, 0, 1);
projDir = Point3m(plane.Direction().X(), plane.Direction().Y(), 0.0);
break;
}
angleRad = Angle(targetPlane, projDir);
float angleSign = (targetPlane ^ projDir) * rotAxis;
if (angleSign < 0)
angleRad = -angleRad;
else if (angleSign == 0)
angleRad = 0;
}
rotAxis.Normalize();
Matrix44m rt;
rt.SetRotateRad(-angleRad, rotAxis);
log("Rotation axis is [%f, %f, %f]", rotAxis.X(), rotAxis.Y(), rotAxis.Z());
log("Rotation angle is %f", -angleRad);
outputValues["rotation_axis"] = QVariant::fromValue(rotAxis);
outputValues["rotation_angle"] = QVariant::fromValue(-angleRad);
Matrix44m transfM;
if (par.getBool("ToOrigin"))
transfM = rt*tr1;
else
transfM = rt;
ApplyTransform(md, transfM, par.getBool("allLayers"), par.getBool("Freeze"), false, false);
} break;
case FP_ROTATE :
{
Matrix44m trRot, trTran, trTranInv, transfM;
Point3m axis, tranVec;
switch(par.getEnum("rotAxis"))
{
case 0: axis=Point3m(1,0,0); break;
case 1: axis=Point3m(0,1,0);break;
case 2: axis=Point3m(0,0,1);break;
case 3: axis=par.getPoint3m("customAxis");break;
}
switch(par.getEnum("rotCenter"))
{
case 0: tranVec=Point3m(0,0,0); break;
case 1: tranVec= m.cm.Tr * m.cm.bbox.Center(); break;
case 2: tranVec=par.getPoint3m("customCenter");break;
}
Scalarm angleDeg= par.getDynamicFloat("angle");
Scalarm snapAngle = par.getFloat("snapAngle");
if(par.getBool("snapFlag"))
{
angleDeg = floor(angleDeg / snapAngle)*snapAngle;
//par.setValue("angle", DynamicFloatValue(angleDeg));
}
trRot.SetRotateDeg(angleDeg,axis);
trTran.SetTranslate(tranVec);
trTranInv.SetTranslate(-tranVec);
transfM = trTran*trRot*trTranInv;
ApplyTransform(md,transfM,par.getBool("allLayers"),par.getBool("Freeze"));
} break;
case FP_PRINCIPAL_AXIS:
{
Matrix44m transfM; transfM.SetIdentity();
if(par.getBool("pointsFlag"))
{
Matrix33m cov;
Point3m bp(0,0,0);
vector<Point3m> PtVec;
for(CMeshO::VertexIterator vi=m.cm.vert.begin(); vi!=m.cm.vert.end();++vi)
if(!(*vi).IsD()) {
PtVec.push_back((*vi).cP());
bp+=(*vi).cP();
}
bp/=m.cm.vn;
cov.Covariance(PtVec,bp);
for(int i=0;i<3;i++)
qDebug("%8.3f %8.3f %8.3f ",cov[i][0],cov[i][1],cov[i][2]);
qDebug("\n");
Matrix33f eigenvecMatrix;
Point3f eigenvecVector;
Eigen::Matrix3d em;
cov.ToEigenMatrix(em);
Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> eig(em);
Eigen::Vector3d c_val = eig.eigenvalues();
Eigen::Matrix3d c_vec = eig.eigenvectors();
eigenvecMatrix.FromEigenMatrix(c_vec);
eigenvecVector.FromEigenVector(c_val);
for(int i=0;i<3;i++)
qDebug("%8.3f %8.3f %8.3f ",eigenvecMatrix[i][0],eigenvecMatrix[i][1],eigenvecMatrix[i][2]);
qDebug("\n%8.3f %8.3f %8.3f ",eigenvecVector[0],eigenvecVector[1],eigenvecVector[2]);
for(int i=0;i<3;++i)
for(int j=0;j<3;++j)
transfM[i][j] = eigenvecMatrix[i][j];
transfM.transposeInPlace();
if(transfM.Determinant()<0)
for(int i=0;i<3;++i) transfM[2][i]=-transfM[2][i];
qDebug("Determinant %f", transfM.Determinant());
}
else
{
tri::Inertia<CMeshO> I(m.cm);
Matrix33m PCA;
Point3m pcav;
I.InertiaTensorEigen(PCA,pcav);
for(int i=0;i<3;i++)
qDebug("%8.3f %8.3f %8.3f",PCA[i][0],PCA[i][1],PCA[i][2]);
PCA.transposeInPlace();
qDebug("Determinant %f", PCA.Determinant());
for(int i=0;i<3;i++)
qDebug("%8.3f %8.3f %8.3f",PCA[i][0],PCA[i][1],PCA[i][2]);
for(int i=0;i<3;++i)
for(int j=0;j<3;++j)
transfM[i][j] = PCA[i][j];
if(transfM.Determinant()<0)
for(int i=0;i<3;++i) transfM[2][i]=-transfM[2][i];
}
ApplyTransform(md, transfM, par.getBool("allLayers"), par.getBool("Freeze"), false, false);
} break;
case FP_CENTER:
{
Matrix44m transfM;
Point3m translation(0.0, 0.0, 0.0);
translation.X() = par.getDynamicFloat("axisX");
translation.Y() = par.getDynamicFloat("axisY");
translation.Z() = par.getDynamicFloat("axisZ");
switch (par.getEnum("traslMethod"))
{
case 0: break; //we already got it from interface
case 1: translation = -(m.cm.Tr * md.bbox().Center()); break; // we consider current transformation when finding bbox center
case 2: translation = -(m.cm.Tr * m.cm.bbox.Center()); break; // we consider current transformation when finding bbox center
case 3: translation = -par.getPoint3m("newOrigin"); break;
}
transfM.SetTranslate(translation);
ApplyTransform(md,transfM,par.getBool("allLayers"),par.getBool("Freeze"));
} break;
case FP_SCALE:
{
Matrix44m transfM, scaleTran, trTran, trTranInv;
Point3m tranVec;
Box3m scalebb;
if(par.getBool("allLayers"))
scalebb = md.bbox();
else
scalebb=md.mm()->cm.trBB();
Scalarm xScale = par.getFloat("axisX");
Scalarm yScale = par.getFloat("axisY");
Scalarm zScale = par.getFloat("axisZ");
if (par.getBool("uniformFlag"))
scaleTran.SetScale(xScale, xScale, xScale);
else
scaleTran.SetScale(xScale, yScale, zScale);
if (par.getBool("unitFlag"))
{
float maxSide = max(scalebb.DimX(), max(scalebb.DimY(), scalebb.DimZ()));
scaleTran.SetScale(1.0 / maxSide, 1.0 / maxSide, 1.0 / maxSide);
}
switch (par.getEnum("scaleCenter"))
{
case 0: tranVec = Point3m(0, 0, 0); break;
case 1: tranVec = m.cm.Tr * scalebb.Center(); break;
case 2: tranVec = par.getPoint3m("customCenter"); break;
}
trTran.SetTranslate(tranVec);
trTranInv.SetTranslate(-tranVec);
transfM = trTran*scaleTran*trTranInv;
ApplyTransform(md,transfM,par.getBool("allLayers"),par.getBool("Freeze"));
} break;
case FP_FLIP_AND_SWAP:
{
Matrix44m tr; tr.SetIdentity();
if(par.getBool("flipX")) { Matrix44m flipM; flipM.SetIdentity(); flipM[0][0]=-1.0f; tr *= flipM; }
if(par.getBool("flipY")) { Matrix44m flipM; flipM.SetIdentity(); flipM[1][1]=-1.0f; tr *= flipM; }
if(par.getBool("flipZ")) { Matrix44m flipM; flipM.SetIdentity(); flipM[2][2]=-1.0f; tr *= flipM; }
if(par.getBool("swapXY")) { Matrix44m swapM; swapM.SetIdentity();
swapM[0][0]=0.0f; swapM[0][1]=1.0f;
swapM[1][0]=1.0f; swapM[1][1]=0.0f;
tr *= swapM; }
if(par.getBool("swapXZ")) { Matrix44m swapM; swapM.SetIdentity();
swapM[0][0]=0.0f; swapM[0][2]=1.0f;
swapM[2][0]=1.0f; swapM[2][2]=0.0f;
tr *= swapM; }
if(par.getBool("swapYZ")) { Matrix44m swapM; swapM.SetIdentity();
swapM[1][1]=0.0f; swapM[1][2]=1.0f;
swapM[2][1]=1.0f; swapM[2][2]=0.0f;
tr *= swapM; }
ApplyTransform(md,tr,par.getBool("allLayers"),par.getBool("Freeze"));
} break;
case FP_NORMAL_EXTRAPOLATION :
{
tri::PointCloudNormal<CMeshO>::Param p;
p.fittingAdjNum = par.getInt("K");
p.smoothingIterNum = par.getInt("smoothIter");
p.viewPoint = par.getPoint3m("viewPos");
p.useViewPoint = par.getBool("flipFlag");
tri::PointCloudNormal<CMeshO>::Compute(m.cm, p,cb);
} break;
case FP_NORMAL_SMOOTH_POINTCLOUD :
{
tri::Allocator<CMeshO>::CompactVertexVector(m.cm);
tri::Smooth<CMeshO>::VertexNormalPointCloud(m.cm,par.getInt("K"),1);
} break;
case FP_COMPUTE_PRINC_CURV_DIR:
{
float CurvatureScale = par.getAbsPerc("Scale");
m.updateDataMask(MeshModel::MM_VERTFACETOPO | MeshModel::MM_FACEFACETOPO);
m.updateDataMask(MeshModel::MM_VERTCURV | MeshModel::MM_VERTCURVDIR);
m.updateDataMask(MeshModel::MM_VERTCOLOR | MeshModel::MM_VERTQUALITY);
if ( tri::Clean<CMeshO>::CountNonManifoldEdgeFF(m.cm) >0 ) {
throw MLException("Mesh has some not 2-manifold faces, cannot compute principal curvature directions");
}
tri::UpdateNormal<CMeshO>::NormalizePerVertex(m.cm);
if(par.getBool("Autoclean")){
int delvert=tri::Clean<CMeshO>::RemoveUnreferencedVertex(m.cm);
tri::Allocator<CMeshO>::CompactVertexVector(m.cm);
log( "Removed %d unreferenced vertices",delvert);
}
switch(par.getEnum("Method"))
{
case 0: tri::UpdateCurvature<CMeshO>::PrincipalDirections(m.cm); break;
case 1: tri::UpdateCurvature<CMeshO>::PrincipalDirectionsPCA(m.cm,CurvatureScale,true,cb); break;
case 2: tri::UpdateCurvature<CMeshO>::PrincipalDirectionsNormalCycle(m.cm); break;
case 3: tri::UpdateCurvatureFitting<CMeshO>::computeCurvature(m.cm); break;
case 4: tri::UpdateCurvatureFitting<CMeshO>::updateCurvatureLocal(m.cm,CurvatureScale,cb); break;
default:assert(0);break;
}
switch(par.getEnum("CurvColorMethod"))
{
case 0: tri::UpdateQuality<CMeshO>::VertexMeanFromCurvatureDir (m.cm); break;
case 1: tri::UpdateQuality<CMeshO>::VertexGaussianFromCurvatureDir(m.cm); break;
case 2: tri::UpdateQuality<CMeshO>::VertexMinCurvFromCurvatureDir(m.cm); break;
case 3: tri::UpdateQuality<CMeshO>::VertexMaxCurvFromCurvatureDir(m.cm); break;
case 4: tri::UpdateQuality<CMeshO>::VertexShapeIndexFromCurvatureDir(m.cm); break;
case 5: tri::UpdateQuality<CMeshO>::VertexCurvednessFromCurvatureDir(m.cm); break;
case 6: tri::UpdateQuality<CMeshO>::VertexConstant(m.cm,0); break;
}
Histogram<Scalarm> H;
tri::Stat<CMeshO>::ComputePerVertexQualityHistogram(m.cm,H);
tri::UpdateColor<CMeshO>::PerVertexQualityRamp(m.cm,H.Percentile(0.1f),H.Percentile(0.9f));
log( "Curvature Range: %f %f (Used 90 percentile %f %f) ",H.MinV(),H.MaxV(),H.Percentile(0.1f),H.Percentile(0.9f));
} break;
case FP_CLOSE_HOLES:
{
m.updateDataMask(MeshModel::MM_FACEFACETOPO);
if ( tri::Clean<CMeshO>::CountNonManifoldEdgeFF(m.cm) > 0){
throw MLException("Mesh has some not 2-manifold edges, filter requires edge manifoldness");
}
size_t OriginalSize= m.cm.face.size();
int MaxHoleSize = par.getInt("MaxHoleSize");
bool SelectedFlag = par.getBool("Selected");
bool SelfIntersectionFlag = par.getBool("SelfIntersection");
bool NewFaceSelectedFlag = par.getBool("NewFaceSelected");
bool RefineHoleFlag = par.getBool("RefineHole");
float RefineHoleEdgeLen = par.getAbsPerc("RefineHoleEdgeLen");
int holeCnt;
if( SelfIntersectionFlag )
holeCnt = tri::Hole<CMeshO>::EarCuttingIntersectionFill<tri::SelfIntersectionEar< CMeshO> >(m.cm,MaxHoleSize,SelectedFlag,cb);
else
holeCnt = tri::Hole<CMeshO>::EarCuttingFill<vcg::tri::MinimumWeightEar< CMeshO> >(m.cm,MaxHoleSize,SelectedFlag,cb);
log("Closed %i holes and added %i new faces",holeCnt,m.cm.fn-OriginalSize);
outputValues["closed_holes"] = holeCnt;
outputValues["new_faces"] = (int)(m.cm.fn-OriginalSize);
assert(tri::Clean<CMeshO>::IsFFAdjacencyConsistent(m.cm));
m.updateBoxAndNormals();
// hole filling filter does not correctly update the border flags (but the topology is still ok!)
if(NewFaceSelectedFlag)
{
tri::UpdateSelection<CMeshO>::FaceClear(m.cm);
for(size_t i=OriginalSize;i<m.cm.face.size();++i)
if(!m.cm.face[i].IsD()) m.cm.face[i].SetS();
}
if(RefineHoleFlag)
{
m.updateDataMask( MeshModel::MM_FACEFACETOPO | MeshModel::MM_VERTFACETOPO |
MeshModel::MM_VERTQUALITY | MeshModel::MM_FACEMARK |
MeshModel::MM_FACEFLAG | MeshModel::MM_VERTMARK );
tri::IsotropicRemeshing<CMeshO>::Params params;
params.SetFeatureAngleDeg(181.0f);
params.adapt = false;
params.selectedOnly = true;
params.splitFlag = true;
params.collapseFlag = true;
params.swapFlag = true;
params.smoothFlag = true;
params.projectFlag = false;
params.surfDistCheck= false;
// Refinement and smoothing can be tricky. Usually it is good to
// 1) start with large tris to get fast convergence to the min surf
// 2) switch a bit to small tri to unfold bad things at the boundary
// 3) go for the desired edge len
// Rinse and repeat.
for(int k=0;k<3;k++)
{
params.SetTargetLen(RefineHoleEdgeLen*3.0); params.iter = 5;
tri::IsotropicRemeshing<CMeshO>::Do(m.cm, params);
params.SetTargetLen(RefineHoleEdgeLen/3.0); params.iter = 3;
tri::IsotropicRemeshing<CMeshO>::Do(m.cm, params);
params.SetTargetLen(RefineHoleEdgeLen ); params.iter = 2;
tri::IsotropicRemeshing<CMeshO>::Do(m.cm, params);
}
}
} break;
case FP_CYLINDER_UNWRAP:
{
Scalarm startAngleDeg = par.getFloat("startAngle");
Scalarm endAngleDeg = par.getFloat("endAngle");
Scalarm radius = par.getFloat("radius");
// Number of unrolling. (e.g. if the user set start=-15 end=375 there are two loops)
int numLoop = int(1+(endAngleDeg-startAngleDeg)/360.0);
vector< vector<int> > VertRefLoop(numLoop);
for(int i=0;i<numLoop;++i)
VertRefLoop[i].resize(m.cm.vert.size(),-1);
log("Computing %i loops from %f to %f",numLoop,startAngleDeg,endAngleDeg);
MeshModel *um=md.addNewMesh("","Unrolled Mesh");
um->updateDataMask(&m);
for (const std::string& tex: m.cm.textures) {
um->addTexture(tex, m.getTexture(tex));
}
float avgZ=0;
CMeshO::VertexIterator vi;
// First loop duplicate accordingly the vertices.
for(vi=m.cm.vert.begin();vi!=m.cm.vert.end();++vi)
if(!(*vi).IsD())
{
Point3m p = (*vi).P();
CMeshO::ScalarType ro,theta,phi;
p.Y()=0;
p.ToPolarRad(ro,theta,phi);
float thetaDeg = math::ToDeg(theta);
int loopIndex =0;
while(thetaDeg<endAngleDeg)
{
if(thetaDeg>=startAngleDeg)
{
CMeshO::VertexIterator nvi = tri::Allocator<CMeshO>::AddVertices(um->cm,1);
VertRefLoop[loopIndex][vi-m.cm.vert.begin()] = nvi - um->cm.vert.begin();
nvi->ImportData(*vi);
nvi->P().X()=-math::ToRad(thetaDeg);
nvi->P().Y()=vi->P().Y();
nvi->P().Z()=ro;
//nvi->N()=(*vi).N();
nvi->C()=(*vi).C();
avgZ += nvi->P().Z();
}
thetaDeg+=360;
loopIndex++;
}
}
// Now correct the x width with the average radius
avgZ = avgZ/um->cm.vert.size();
if(radius != 0) avgZ = radius; // if the user desire to override that value.
for(vi=um->cm.vert.begin();vi!=um->cm.vert.end();++vi)
vi->P().X()*=avgZ;
// Second Loop Process Faces
// Note the particolar care to manage the faces that jumps from one side to another.
CMeshO::FaceIterator fi;
for(fi=m.cm.face.begin();fi!=m.cm.face.end();++fi)
if(!(*fi).IsD())
{
int loopIndex=0;
while(loopIndex<numLoop)
{
int endIt = min(2,numLoop-loopIndex);
for(int ii0=0;ii0<endIt;ii0++)
{
for(int ii1=0;ii1<endIt;ii1++)
{
for(int ii2=0;ii2<endIt;ii2++)
{
int i0 = VertRefLoop[loopIndex+ii0][(*fi).V(0)-&m.cm.vert[0]];
int i1 = VertRefLoop[loopIndex+ii1][(*fi).V(1)-&m.cm.vert[0]];
int i2 = VertRefLoop[loopIndex+ii2][(*fi).V(2)-&m.cm.vert[0]];
if(i0>=0 && i1>=0 && i2>=0)
{
// skip faces larger than 1/10 of the radius...
if( (Distance(um->cm.vert[i0].P(),um->cm.vert[i1].P()) < avgZ/10.0) &&
(Distance(um->cm.vert[i0].P(),um->cm.vert[i2].P()) < avgZ/10.0) )
{
CMeshO::FaceIterator nfi = tri::Allocator<CMeshO>::AddFaces(um->cm,1);
(*nfi).ImportData(*fi);
nfi->V(0) = &um->cm.vert[i0];
nfi->V(1) = &um->cm.vert[i1];
nfi->V(2) = &um->cm.vert[i2];
}
}
}
}
}
loopIndex++;
}
}
m.updateBoxAndNormals();
} break;
case FP_REFINE_HALF_CATMULL:
{
if (!vcg::tri::BitQuadCreation<CMeshO>::IsTriQuadOnly(m.cm))
{
throw MLException("To be applied filter <i>" + filter->text() + "</i> requires a mesh with only triangular and/or quad faces.");
}
m.updateDataMask(MeshModel::MM_FACEQUALITY | MeshModel::MM_FACEFACETOPO);
tri::BitQuadCreation<CMeshO>::MakePureByRefine(m.cm);
tri::UpdateNormal<CMeshO>::PerBitQuadFaceNormalized(m.cm);
m.clearDataMask( MeshModel::MM_FACEFACETOPO);
m.updateDataMask(MeshModel::MM_POLYGONAL);
} break;
case FP_REFINE_CATMULL :
{
if (!vcg::tri::BitQuadCreation<CMeshO>::IsTriQuadOnly(m.cm))
{
throw MLException("To be applied filter <i>" + filter->text() + "</i> requires a mesh with only triangular and/or quad faces.");
}
// in practice it is just a simple double application of the FP_REFINE_HALF_CATMULL.
m.updateDataMask(MeshModel::MM_FACEQUALITY | MeshModel::MM_FACEFACETOPO);
tri::BitQuadCreation<CMeshO>::MakePureByRefine(m.cm);
tri::BitQuadCreation<CMeshO>::MakePureByRefine(m.cm);
tri::UpdateNormal<CMeshO>::PerBitQuadFaceNormalized(m.cm);
m.clearDataMask(MeshModel::MM_FACEFACETOPO);
m.updateDataMask(MeshModel::MM_POLYGONAL);
} break;
case FP_REFINE_DOOSABIN :
{
PMesh baseIn, refinedOut;
m.updateDataMask(MeshModel::MM_FACEFACETOPO);
tri::PolygonSupport<CMeshO,PMesh>::ImportFromTriMesh(baseIn,m.cm);
tri::Clean<PMesh>::RemoveUnreferencedVertex(baseIn);
tri::Allocator<PMesh>::CompactEveryVector(baseIn);
tri::DooSabin<PMesh>::Refine(baseIn, refinedOut);
m.cm.Clear();
tri::PolygonSupport<CMeshO,PMesh>::ImportFromPolyMesh(m.cm,refinedOut);
m.updateDataMask(MeshModel::MM_FACEFACETOPO);
tri::UpdateTopology<CMeshO>::FaceFace(m.cm);
tri::UpdateNormal<CMeshO>::PerBitPolygonFaceNormalized(m.cm);
tri::UpdateNormal<CMeshO>::PerVertexFromCurrentFaceNormal(m.cm);
} break;
case FP_QUAD_PAIRING :
{
m.updateDataMask(MeshModel::MM_FACEQUALITY | MeshModel::MM_FACEFACETOPO );
if ( tri::Clean<CMeshO>::CountNonManifoldEdgeFF(m.cm) > 0)
{
throw MLException("Mesh has some not 2 manifoldfaces, filter requires manifoldness");
}
tri::BitQuadCreation<CMeshO>::MakeTriEvenBySplit(m.cm);
bool ret = tri::BitQuadCreation<CMeshO>::MakePureByFlip(m.cm,100);
if(!ret) log("Warning BitQuadCreation<CMeshO>::MakePureByFlip failed.");
tri::UpdateNormal<CMeshO>::PerBitQuadFaceNormalized(m.cm);
m.updateDataMask(MeshModel::MM_POLYGONAL);
} break;
case FP_QUAD_DOMINANT:
{
m.updateDataMask(MeshModel::MM_FACEQUALITY | MeshModel::MM_FACEFACETOPO );
int level = par.getEnum("level");
vcg::tri::BitQuadCreation<CMeshO>::MakeDominant(m.cm,level);
tri::UpdateNormal<CMeshO>::PerBitQuadFaceNormalized(m.cm);
m.clearDataMask(MeshModel::MM_FACEFACETOPO);
m.updateDataMask(MeshModel::MM_POLYGONAL);
} break;
case FP_MAKE_PURE_TRI:
{
vcg::tri::BitQuadCreation<CMeshO>::MakeBitTriOnly(m.cm);
m.updateBoxAndNormals();
m.clearDataMask(MeshModel::MM_POLYGONAL);
} break;
case FP_FAUX_CREASE :
{
m.updateDataMask(MeshModel::MM_FACEFACETOPO);
Scalarm AngleDegNeg = par.getFloat("AngleDegNeg");
Scalarm AngleDegPos = par.getFloat("AngleDegPos");
// tri::UpdateFlags<CMeshO>::FaceFauxCrease(m.cm,math::ToRad(AngleDeg));
tri::UpdateFlags<CMeshO>::FaceEdgeSelSignedCrease(m.cm, math::ToRad(AngleDegNeg), math::ToRad(AngleDegPos));
m.updateDataMask(MeshModel::MM_POLYGONAL);
} break;
case FP_FAUX_EXTRACT :
{
//WARNING!!!! the RenderMode(GLW::DMWire) should be useless but...
MeshModel *em= md.addNewMesh("","EdgeMesh",true/*,RenderMode(GLW::DMWire)*/);
BuildFromFaceEdgeSel(m.cm,em->cm);
} break;
case FP_VATTR_SEAM :
{
unsigned int vmask = 0;
vmask |= vcg::tri::AttributeSeam::POSITION_PER_VERTEX;
unsigned int nmask = 0;
switch (par.getEnum("NormalMode"))
{
case 0 : break;
case 1 : if (m.hasDataMask(MeshModel::MM_VERTNORMAL)) nmask |= vcg::tri::AttributeSeam::NORMAL_PER_VERTEX; break;
case 2 : if (m.hasDataMask(MeshModel::MM_WEDGNORMAL)) nmask |= vcg::tri::AttributeSeam::NORMAL_PER_WEDGE; break;
case 3 : if (m.hasDataMask(MeshModel::MM_FACENORMAL)) nmask |= vcg::tri::AttributeSeam::NORMAL_PER_FACE; break;
default : break;
}
if (nmask != 0) m.updateDataMask(MeshModel::MM_VERTNORMAL);
unsigned int cmask = 0;
switch (par.getEnum("ColorMode"))
{
case 0 : break;
case 1 : if (m.hasDataMask(MeshModel::MM_VERTCOLOR)) cmask |= vcg::tri::AttributeSeam::COLOR_PER_VERTEX; break;
case 2 : if (m.hasDataMask(MeshModel::MM_WEDGCOLOR)) cmask |= vcg::tri::AttributeSeam::COLOR_PER_WEDGE; break;
case 3 : if (m.hasDataMask(MeshModel::MM_FACECOLOR)) cmask |= vcg::tri::AttributeSeam::COLOR_PER_FACE; break;
default : break;
}
if (cmask != 0) m.updateDataMask(MeshModel::MM_VERTCOLOR);
unsigned int tmask = 0;
switch (par.getEnum("TexcoordMode"))
{
case 0 : break;
case 1 : if (m.hasDataMask(MeshModel::MM_VERTTEXCOORD)) tmask |= vcg::tri::AttributeSeam::TEXCOORD_PER_VERTEX; break;
case 2 : if (m.hasDataMask(MeshModel::MM_WEDGTEXCOORD)) tmask |= vcg::tri::AttributeSeam::TEXCOORD_PER_WEDGE; break;
default : break;
}
if (tmask != 0) m.updateDataMask(MeshModel::MM_VERTTEXCOORD);
const unsigned int mask = vmask | nmask | cmask | tmask;
if (mask != 0)
{
vcg::tri::AttributeSeam::ASExtract<CMeshO, CMeshO> vExtract(mask);
vcg::tri::AttributeSeam::ASCompare<CMeshO> vCompare(mask);
const bool r = vcg::tri::AttributeSeam::SplitVertex(m.cm, vExtract, vCompare);
m.clearDataMask(MeshModel::MM_FACEFACETOPO);
m.clearDataMask(MeshModel::MM_VERTFACETOPO);
if (!r)
throw MLException("Failed applying " + filter->text());
}
} break;
case FP_PERIMETER_POLYLINE:
{
if (m.cm.sfn == 0) // no face selected, fail
{
log("ERROR: There is no face selection!");
throw MLException("ERROR: There is no face selection!");
}
log("Selection is %i triangles", m.cm.sfn);
md.mm()->updateDataMask(MeshModel::MM_FACEFACETOPO);
// new layer
QString newLayerName = QFileInfo(m.shortName()).baseName() + "_perimeter";
MeshModel* perimeter = md.addNewMesh("", newLayerName, true);
perimeter->clear();
Matrix44m rotM = m.cm.Tr;
rotM.SetColumn(3, Point3m(0.0, 0.0, 0.0));
for (CMeshO::FaceIterator fi = m.cm.face.begin(); fi != m.cm.face.end(); ++fi)
if (!(*fi).IsD())
if ((*fi).IsS())
{
for (int ei = 0; ei < 3; ei++)
{
CMeshO::FacePointer adjf = (*fi).FFp(ei);
if (adjf == &(*fi) || !(adjf->IsS()))
{
CMeshO::VertexIterator nvi;
vcg::tri::Allocator<CMeshO>::AddEdges(perimeter->cm, 1);
nvi = vcg::tri::Allocator<CMeshO>::AddVertices(perimeter->cm, 2);
(*nvi).P() = m.cm.Tr * (*fi).V(ei)->P();
(*nvi).N() = rotM * (*fi).V(ei)->N();
perimeter->cm.edge.back().V(0) = &(*nvi);
nvi++;
(*nvi).P() = m.cm.Tr * (*fi).V((ei + 1) % 3)->P();
(*nvi).N() = rotM * (*fi).V((ei + 1) % 3)->N();
perimeter->cm.edge.back().V(1) = &(*nvi);
}
}
}
// finishing up the new layer
tri::Clean<CMeshO>::RemoveDuplicateVertex(perimeter->cm);
tri::UpdateBounding<CMeshO>::Box(perimeter->cm);
}break;
case FP_SLICE_WITH_A_PLANE:
{
Point3m planeAxis(0,0,0);
int ind = par.getEnum("planeAxis");
if(ind>=0 && ind<3)
planeAxis[ind] = 1.0f;
else
planeAxis=par.getPoint3m("customAxis");
planeAxis.Normalize();
Scalarm planeOffset = par.getFloat("planeOffset");
Point3m planeCenter;
Plane3m slicingPlane;
Box3m bbox=m.cm.bbox;
MeshModel* base=&m;
MeshModel* orig=&m;
m.updateDataMask(MeshModel::MM_FACEFACETOPO);
//actual cut of the mesh
if (tri::Clean<CMeshO>::CountNonManifoldEdgeFF(base->cm)>0 || (tri::Clean<CMeshO>::CountNonManifoldVertexFF(base->cm,false) != 0))
{
log("Mesh is not two manifold, cannot apply filter");
throw MLException("Mesh is not two manifold, cannot apply filter");
}
// the mesh has to be correctly transformed
if (m.cm.Tr != Matrix44m::Identity())
tri::UpdatePosition<CMeshO>::Matrix(m.cm, m.cm.Tr, true);
switch(RefPlane(par.getEnum("relativeTo")))
{
case REF_CENTER: planeCenter = bbox.Center()+ planeAxis*planeOffset*(bbox.Diag()/2.0); break;
case REF_MIN: planeCenter = bbox.min+planeAxis*planeOffset*(bbox.Diag()/2.0); break;
case REF_ORIG: planeCenter = planeAxis*planeOffset; break;
}
//planeCenter+=planeAxis*planeDist ;
slicingPlane.Init(planeCenter,planeAxis);
// making up new layer name
QString sectionName = QFileInfo(base->shortName()).baseName() + "_sect";
switch(ind)
{
case 0: sectionName.append("_X_"); break;
case 1: sectionName.append("_Y_"); break;
case 2: sectionName.append("_Z_"); break;
case 3: sectionName.append("_custom_"); break;
}
sectionName.append(QString::number(planeOffset));
MeshModel* cap= md.addNewMesh("",sectionName,true);
vcg::IntersectionPlaneMesh<CMeshO, CMeshO, CMeshO::ScalarType>(orig->cm, slicingPlane, cap->cm );
tri::Clean<CMeshO>::RemoveDuplicateVertex(cap->cm);
tri::UpdateBounding<CMeshO>::Box(cap->cm);
// the mesh has to return to its original position
if (m.cm.Tr != Matrix44m::Identity())
tri::UpdatePosition<CMeshO>::Matrix(m.cm, Inverse(m.cm.Tr), true);
if(par.getBool("createSectionSurface"))
{
MeshModel* cap2= md.addNewMesh("",sectionName+"_filled");
tri::CapEdgeMesh(cap->cm, cap2->cm);
cap2->updateBoxAndNormals();
}
if(par.getBool("splitSurfaceWithSection"))
{
MeshModel* underM= md.addNewMesh("",sectionName+"_under");
underM->updateDataMask(orig);
underM->updateDataMask(MeshModel::MM_FACEFACETOPO);
underM->updateDataMask(MeshModel::MM_VERTQUALITY);
for (const std::string& tex : orig->cm.textures) {
underM->addTexture(tex, orig->getTexture(tex));
}
tri::Append<CMeshO,CMeshO>::Mesh(underM->cm,orig->cm);
tri::UpdateQuality<CMeshO>::VertexFromPlane(underM->cm, slicingPlane);
tri::QualityMidPointFunctor<CMeshO> slicingfunc(0.0);
tri::QualityEdgePredicate<CMeshO> slicingpred(0.0,0.0);
tri::UpdateTopology<CMeshO>::FaceFace(underM->cm);
if ( tri::Clean<CMeshO>::CountNonManifoldEdgeFF(underM->cm) > 0)
{
log("Mesh has some not 2 manifoldfaces, splitting surfaces requires manifoldness");
md.delMesh(underM->id());
}
else
{
tri::RefineE<CMeshO, tri::QualityMidPointFunctor<CMeshO>, tri::QualityEdgePredicate<CMeshO> > (underM->cm, slicingfunc, slicingpred, false);
tri::UpdateSelection<CMeshO>::VertexFromQualityRange(underM->cm,0,std::numeric_limits<float>::max());
tri::UpdateSelection<CMeshO>::FaceFromVertexStrict(underM->cm);
tri::UpdateSelection<CMeshO>::FaceInvert(underM->cm);
tri::UpdateSelection<CMeshO>::VertexClear(underM->cm);
MeshModel* overM= md.addNewMesh("",sectionName+"_over");
overM->updateDataMask(underM);
for (const std::string& tex : underM->cm.textures) {
overM->addTexture(tex, underM->getTexture(tex));
}
tri::Append<CMeshO,CMeshO>::Mesh(overM->cm,underM->cm,true);
tri::UpdateSelection<CMeshO>::Clear(overM->cm);
tri::UpdateSelection<CMeshO>::VertexClear(underM->cm);
tri::UpdateSelection<CMeshO>::VertexFromFaceStrict(underM->cm);
for(auto fi=underM->cm.face.begin();fi!=underM->cm.face.end();++fi)
if(!(*fi).IsD() && (*fi).IsS())
tri::Allocator<CMeshO>::DeleteFace(underM->cm,*fi);
for(auto vi=underM->cm.vert.begin();vi!=underM->cm.vert.end();++vi)
if(!(*vi).IsD() && (*vi).IsS())
tri::Allocator<CMeshO>::DeleteVertex(underM->cm,*vi);
underM->updateBoxAndNormals();
overM->updateBoxAndNormals();
}
}
} break;
default:
wrongActionCalled(filter);
}
return outputValues;
}
int ExtraMeshFilterPlugin::postCondition(const QAction * filter) const
{
switch (ID(filter))
{
case FP_ROTATE_FIT :
case FP_PRINCIPAL_AXIS :
case FP_FLIP_AND_SWAP :
case FP_SCALE :
case FP_CENTER :
case FP_ROTATE :
case FP_SET_TRANSFORM_PARAMS :
case FP_SET_TRANSFORM_MATRIX :
case FP_FREEZE_TRANSFORM : return MeshModel::MM_TRANSFMATRIX + MeshModel::MM_VERTCOORD + MeshModel::MM_VERTNORMAL + MeshModel::MM_FACENORMAL;
case FP_RESET_TRANSFORM :
case FP_INVERT_TRANSFORM : return MeshModel::MM_TRANSFMATRIX;
case FP_NORMAL_EXTRAPOLATION :
case FP_NORMAL_SMOOTH_POINTCLOUD : return MeshModel::MM_VERTNORMAL;
case FP_LOOP_SS :
case FP_BUTTERFLY_SS :
case FP_CLUSTERING :
case FP_QUADRIC_SIMPLIFICATION :
case FP_QUADRIC_TEXCOORD_SIMPLIFICATION :
case FP_EXPLICIT_ISOTROPIC_REMESHING :
case FP_MIDPOINT :
case FP_REORIENT :
case FP_INVERT_FACES :
case FP_CLOSE_HOLES :
case FP_REFINE_CATMULL :
case FP_REFINE_HALF_CATMULL :
case FP_QUAD_DOMINANT :
case FP_MAKE_PURE_TRI :
case FP_QUAD_PAIRING :
case FP_FAUX_CREASE :
case FP_FAUX_EXTRACT :
case FP_VATTR_SEAM :
case FP_REFINE_LS3_LOOP : return MeshModel::MM_GEOMETRY_AND_TOPOLOGY_CHANGE;
case FP_COMPUTE_PRINC_CURV_DIR : return MeshModel::MM_VERTFACETOPO | MeshModel::MM_FACEFACETOPO | MeshModel::MM_VERTCURV | MeshModel::MM_VERTCURVDIR | MeshModel::MM_VERTCOLOR | MeshModel::MM_VERTQUALITY;
case FP_SLICE_WITH_A_PLANE :
case FP_PERIMETER_POLYLINE :
case FP_CYLINDER_UNWRAP : return MeshModel::MM_NONE; // they create a new layer
default : return MeshModel::MM_ALL;
}
}
MESHLAB_PLUGIN_NAME_EXPORTER(ExtraMeshFilterPlugin)
```
|
Alain Sylvain Amougou (Ukrainian: Ален Сільвен Амугу born 6 September 1973 in Yaounde, Cameroon) is a Cameroonian retired professional footballer.
Career
South Africa
Recruited by Mamelodi Sundowns of the South African Premier Division for 1999/00, Amougou racked 15 goals that season, showing the ability of his left foot but his next two years at Chloorkop were plagued by injuries and other problems.
Speculation about him being arrested abated when he was called to front the Sundowns' attack in the first round of the 2000 CAF Champions League.
Ukraine
Reinforcing Metalist Kharkiv of the Ukrainian Premier League in 2002, the first African to be in the club, the Cameroonian racked 4 league and 1 cup outing there, debuting on 27 July in a 1–1 tie with Obolon before being released by the end 2002. Two reasons for his release are that he did not lineup to expectations and that he did not like the Ukrainian environment.
Sweden
Set to complete a move to IFK Norrköping of the Swedish Allsvenskan in 2002, Peking rejected the forward, causing him to return home.
References
External links
Ukrainian Wikipedia Page
Allayers.in.ua Profile
1973 births
Footballers from Yaoundé
Men's association football forwards
Expatriate men's soccer players in South Africa
Expatriate men's footballers in Réunion
Cameroonian men's footballers
Living people
Cameroonian expatriate men's footballers
Expatriate men's footballers in Ukraine
Mamelodi Sundowns F.C. players
FC Metalist Kharkiv players
AS Saint-Louisienne players
Ukrainian Premier League players
Cameroonian expatriate sportspeople in Ukraine
|
St. Werburgh's Church, Spondon, is a parish church in the Inclusive Anglo-Catholic tradition of the Church of England located in Spondon, Derbyshire.
History
The present church dates from around 1390, when it replaced an earlier church destroyed by fire in 1340. It was re-listed as Grade II in 2012.
The main body of the church, both nave and chancel, along with the 35 metre high tower and spire, date to 1390, although the north wall of the nave was damaged by subsidence and was rebuilt in 1826.
The church was restored in 1892 which removed many of the changes made in 1826. The flat roof of 1826 was replaced with a pitched roof, and other roofs were raised in height. The north aisle was rebuilt and the exterior wall was moved out by 5 ft. Plaster from the walls and the pillars was removed, to reveal the original stonework. The arch under the tower was opened up, and the galleries were removed. The architect was John Oldrid Scott of London, and the contractor was Rudd of Grantham.
There have been recent extensive renovations to the tower and spire funded partly through Heritage Lottery funding. An ongoing project will restore the floor of the nave, improve welcome facilities and reorder the sanctuary adding permanent nave altar in front to the choir stalls on an extended nave altar platform.
Previous incumbents include Canon Richard Andrews, who now serves as Canon Precentor of Derby Cathedral, and the Very Reverend Geoffrey Marshall who went on to become Dean of Brecon in the Church in Wales. The Reverend TEM Barber was vicar from March 1939 until May 1986 and is known to have been the longest serving vicar in the Church of England at that time, and also the longest-ever serving vicar of Spondon.
During his time as vicar, the church congregations flourished. He taught strictly from the Book of Common Prayer with High Church ritual, and was renowned for his work with the sick and dying, and for his work with young people. One of his greatest works was with the Spondon Church Boys' Club, which he founded in 1939 and ran until his death in 1988. A feature of the club was the annual summer camp to various venues, but latterly to Sidmouth. Following his resignation due to ill health at the age of 79 he was, thanks to his many friends, able to continue to run Spondon Church Boys' Club until his death two years later. His funeral was packed by his parishioners, past and present, whom he had served for 50 years.
Features
Adjacent to the main altar is a recessed sedilia, nearby a priest's sanctus window and there are four piscinas within the church.
There is a First World War memorial located in the Lady Chapel and a newer Tower Chapel. The royal arms displayed over the north door is dated between 1702 and 1707 because it displays the arms of Queen Anne before the union with Scotland.
Externally can be found the remains of a decorated cross shaft said to date to around 870, though it was not originally located in the churchyard.
In the nearby former vicarage grounds is what is considered to be a holy well.
Bells
There is a peal of six bells, one of which is 16th century, one 17th, and the remaining four of the 19th century.
Organ
A new organ was installed and opened by W.E. Gover of St Werburgh's Church, Derby on 21 April 1839.
The organ by the builder James Jepson Binns was opened on 14 June 1905 and has a case by John Oldrid Scott. The total cost was £1,100 (). The electric action was fitted by M.C. Thompson in 1989. A specification of the organ can be found on the National Pipe Organ Register.
See also
Listed buildings in Spondon
References
Saint Werburgh
Church of England church buildings in Derbyshire
Grade II listed churches in Derbyshire
Anglo-Catholic church buildings in Derbyshire
Churches completed in 1390
14th-century church buildings in England
|
```vue
<template>
<button @click="page = newPage">
Switch to page {{ newPage }}
</button>
</template>
<script>
export default {
data() {
return {
page: "home",
newPage: "products",
}
}
}
</script>
```
|
General Ballivián is a village and rural municipality in Salta Province in northwestern Argentina. General Ballivián's hot subtropical climate is nestled between the subtropical and Chaco Salta at the foot of the mountains of Tartagal. It is considered part of the "vermilion region". General Ballivián was founded when oil was found in the area.
Population
In 2001, General Ballivián had 1,591 inhabitants (INDEC), which represents an increase of 51.7% compared to the 1,049 inhabitants (INDEC) of the previous census in 1991.
Name Sake
General Ballivián is named in tribute to Bolivian ex-president (1841–1847) and General José Ballivián (1805–1852). Ballivián was known as a warrior who diplomatically and militarily tried unsuccessfully to expand Bolivia to obtain an ocean port in Arica, Chile.
References
Populated places in Salta Province
|
```xml
import vtkScalarsToColors from "../../../Common/Core/ScalarsToColors";
import { Size, Vector2, Vector3 } from "../../../types";
import vtkActor, { IActorInitialValues } from "../Actor";
export interface ITextSizes {
tickWidth: number;
tickHeight: number;
}
export interface IResult {
ptIdx: number,
cellIdx: number,
polys: Float64Array,
points: Uint16Array,
tcoords: Float32Array,
}
export interface IStyle {
fontColor?: string;
fontStyle?: string;
fontFamily?: string;
fontSize?: string;
}
/**
*
*/
export interface IScalarBarActorInitialValues extends IActorInitialValues {
automated?: boolean,
autoLayout?: (publicAPI: object, model: object) => void,
axisLabel?: string,
barPosition?: Vector2,
barSize?: Size,
boxPosition?: Vector2,
boxSize?: Size,
scalarToColors?: null,
axisTitlePixelOffset?: number,
axisTextStyle?: IStyle,
tickLabelPixelOffset?: number,
tickTextStyle?: IStyle
}
export interface vtkScalarBarActor extends vtkActor {
/**
*
* @param {Boolean} doUpdate
*/
completedImage(doUpdate: boolean): void;
/**
* based on all the settins compute a barSegments array containing the
* segments opf the scalar bar each segment contains :
* corners[4][2]
* title - e.g. NaN, Above, ticks
* scalars - the normalized scalars values to use for that segment
*
* Note that the bar consumes the space in the box that remains
* after leaving room for the text labels.
* @param {ITextSizes} textSizes
*/
computeBarSize(textSizes: ITextSizes): Size;
/**
* Called by updatePolyDataForLabels modifies class constants ptv3, tmpv3
* @param text
* @param pos
* @param xdir
* @param ydir
* @param dir
* @param offset
* @param results
*/
createPolyDataForOneLabel(text: string, pos: Vector3, xdir: Vector3, ydir: Vector3, dir: Vector2, offset: number, results: IResult): void;
/**
*
*/
getActors(): vtkActor[];
/**
*
*/
getAutoLayout(): any;
/**
*
*/
getAutomated(): boolean;
/**
*
*/
getAxisLabel(): string;
/**
*
*/
getAxisTextStyle(): IStyle;
/**
*
*/
getAxisTitlePixelOffset(): number;
/**
*
*/
getBoxPosition(): Vector2;
/**
*
*/
getBoxPositionByReference(): Vector2;
/**
*
*/
getBoxSize(): Size;
/**
*
*/
getBoxSizeByReference(): Size;
/**
*
*/
getNestedProps(): vtkActor[];
/**
*
*/
getScalarsToColors(): vtkScalarsToColors;
/**
*
*/
getTickTextStyle(): IStyle;
/**
*
* @param {ITextSizes} textSizes
*/
recomputeBarSegments(textSizes: ITextSizes): void;
/**
*
*/
resetAutoLayoutToDefault(): void;
/**
*
* @param autoLayout
*/
setAutoLayout(autoLayout: any): boolean;
/**
*
* @param {Boolean} automated
*/
setAutomated(automated: boolean): boolean;
/**
*
* @param {String} axisLabel
*/
setAxisLabel(axisLabel: string): boolean;
/**
*
* @param {IStyle} axisTextStyle
*/
setAxisTextStyle(axisTextStyle: IStyle): boolean;
/**
*
* @param {Number} axisTitlePixelOffset
*/
setAxisTitlePixelOffset(axisTitlePixelOffset: number): boolean;
/**
*
* @param {Vector2} boxPosition
*/
setBoxPosition(boxPosition: Vector2): boolean;
/**
*
* @param {Vector2} boxPosition
*/
setBoxPositionFrom(boxPosition: Vector2): boolean;
/**
*
* @param {Size} boxSize
*/
setBoxSize(boxSize: Size): boolean;
/**
*
* @param {Size} boxSize
*/
setBoxSizeFrom(boxSize: Size): boolean;
/**
*
* @param {vtkScalarsToColors} scalarsToColors
*/
setScalarsToColors(scalarsToColors: vtkScalarsToColors): boolean;
/**
*
* @param tickLabelPixelOffset
*/
setTickLabelPixelOffset(tickLabelPixelOffset: number): boolean;
/**
*
* @param {IStyle} tickStyle
*/
setTickTextStyle(tickStyle: IStyle): void;
/**
*
*/
setVisibility(visibility: boolean): boolean;
/**
* main method to rebuild the scalarBar when something has changed tracks
* modified times
*/
update(): void;
/**
*
*/
updatePolyDataForBarSegments(): void;
/**
* Udate the polydata associated with drawing the text labels
* specifically the quads used for each label and their associated tcoords
* etc. This changes every time the camera viewpoint changes
*/
updatePolyDataForLabels(): void;
/**
* create the texture map atlas that contains the rendering of
* all the text strings. Only needs to be called when the text strings
* have changed (labels and ticks)
*/
updateTextureAtlas(): void;
}
/**
* Method use to decorate a given object (publicAPI+model) with vtkScalarBarActor characteristics.
*
* @param publicAPI object on which methods will be bounds (public)
* @param model object on which data structure will be bounds (protected)
* @param {IScalarBarActorInitialValues} [initialValues] (default: {})
*/
export function extend(publicAPI: object, model: object, initialValues?: IScalarBarActorInitialValues): void;
/**
* Method use to create a new instance of vtkScalarBarActor
*/
export function newInstance(initialValues?: IScalarBarActorInitialValues): vtkScalarBarActor;
/**
* vtkScalarBarActor creates a scalar bar with tick marks. A
* scalar bar is a legend that indicates to the viewer the correspondence
* between color value and data value. The legend consists of a rectangular bar
* made of rectangular pieces each colored a constant value. Since
* vtkScalarBarActor is a subclass of vtkActor2D, it is drawn in the image plane
* (i.e., in the renderer's viewport) on top of the 3D graphics window.
*/
export declare const vtkScalarBarActor: {
newInstance: typeof newInstance,
extend: typeof extend,
};
export default vtkScalarBarActor;
```
|
Melanochyla is a genus of plants in the family Anacardiaceae.
Taxonomy
Species
, Plants of the World online has 23 accepted species:
References
Anacardiaceae genera
Taxonomy articles created by Polbot
|
```python
""" Sketching-based Matrix Computations """
# Author: Jordi Montes <jomsdev@gmail.com>
# August 28, 2017
import numpy as np
from scipy._lib._util import check_random_state, rng_integers
from scipy.sparse import csc_matrix
__all__ = ['clarkson_woodruff_transform']
def cwt_matrix(n_rows, n_columns, seed=None):
r"""
Generate a matrix S which represents a Clarkson-Woodruff transform.
Given the desired size of matrix, the method returns a matrix S of size
(n_rows, n_columns) where each column has all the entries set to 0
except for one position which has been randomly set to +1 or -1 with
equal probability.
Parameters
----------
n_rows: int
Number of rows of S
n_columns: int
Number of columns of S
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
S : (n_rows, n_columns) csc_matrix
The returned matrix has ``n_columns`` nonzero entries.
Notes
-----
Given a matrix A, with probability at least 9/10,
.. math:: \|SA\| = (1 \pm \epsilon)\|A\|
Where the error epsilon is related to the size of S.
"""
rng = check_random_state(seed)
rows = rng_integers(rng, 0, n_rows, n_columns)
cols = np.arange(n_columns+1)
signs = rng.choice([1, -1], n_columns)
S = csc_matrix((signs, rows, cols),shape=(n_rows, n_columns))
return S
def clarkson_woodruff_transform(input_matrix, sketch_size, seed=None):
r"""
Applies a Clarkson-Woodruff Transform/sketch to the input matrix.
Given an input_matrix ``A`` of size ``(n, d)``, compute a matrix ``A'`` of
size (sketch_size, d) so that
.. math:: \|Ax\| \approx \|A'x\|
with high probability via the Clarkson-Woodruff Transform, otherwise
known as the CountSketch matrix.
Parameters
----------
input_matrix: array_like
Input matrix, of shape ``(n, d)``.
sketch_size: int
Number of rows for the sketch.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
A' : array_like
Sketch of the input matrix ``A``, of size ``(sketch_size, d)``.
Notes
-----
To make the statement
.. math:: \|Ax\| \approx \|A'x\|
precise, observe the following result which is adapted from the
proof of Theorem 14 of [2]_ via Markov's Inequality. If we have
a sketch size ``sketch_size=k`` which is at least
.. math:: k \geq \frac{2}{\epsilon^2\delta}
Then for any fixed vector ``x``,
.. math:: \|Ax\| = (1\pm\epsilon)\|A'x\|
with probability at least one minus delta.
This implementation takes advantage of sparsity: computing
a sketch takes time proportional to ``A.nnz``. Data ``A`` which
is in ``scipy.sparse.csc_matrix`` format gives the quickest
computation time for sparse input.
>>> from scipy import linalg
>>> from scipy import sparse
>>> rng = np.random.default_rng()
>>> n_rows, n_columns, density, sketch_n_rows = 15000, 100, 0.01, 200
>>> A = sparse.rand(n_rows, n_columns, density=density, format='csc')
>>> B = sparse.rand(n_rows, n_columns, density=density, format='csr')
>>> C = sparse.rand(n_rows, n_columns, density=density, format='coo')
>>> D = rng.standard_normal((n_rows, n_columns))
>>> SA = linalg.clarkson_woodruff_transform(A, sketch_n_rows) # fastest
>>> SB = linalg.clarkson_woodruff_transform(B, sketch_n_rows) # fast
>>> SC = linalg.clarkson_woodruff_transform(C, sketch_n_rows) # slower
>>> SD = linalg.clarkson_woodruff_transform(D, sketch_n_rows) # slowest
That said, this method does perform well on dense inputs, just slower
on a relative scale.
Examples
--------
Given a big dense matrix ``A``:
>>> from scipy import linalg
>>> n_rows, n_columns, sketch_n_rows = 15000, 100, 200
>>> rng = np.random.default_rng()
>>> A = rng.standard_normal((n_rows, n_columns))
>>> sketch = linalg.clarkson_woodruff_transform(A, sketch_n_rows)
>>> sketch.shape
(200, 100)
>>> norm_A = np.linalg.norm(A)
>>> norm_sketch = np.linalg.norm(sketch)
Now with high probability, the true norm ``norm_A`` is close to
the sketched norm ``norm_sketch`` in absolute value.
Similarly, applying our sketch preserves the solution to a linear
regression of :math:`\min \|Ax - b\|`.
>>> from scipy import linalg
>>> n_rows, n_columns, sketch_n_rows = 15000, 100, 200
>>> rng = np.random.default_rng()
>>> A = rng.standard_normal((n_rows, n_columns))
>>> b = rng.standard_normal(n_rows)
>>> x = np.linalg.lstsq(A, b, rcond=None)
>>> Ab = np.hstack((A, b.reshape(-1,1)))
>>> SAb = linalg.clarkson_woodruff_transform(Ab, sketch_n_rows)
>>> SA, Sb = SAb[:,:-1], SAb[:,-1]
>>> x_sketched = np.linalg.lstsq(SA, Sb, rcond=None)
As with the matrix norm example, ``np.linalg.norm(A @ x - b)``
is close to ``np.linalg.norm(A @ x_sketched - b)`` with high
probability.
References
----------
.. [1] Kenneth L. Clarkson and David P. Woodruff. Low rank approximation and
regression in input sparsity time. In STOC, 2013.
.. [2] David P. Woodruff. Sketching as a tool for numerical linear algebra.
In Foundations and Trends in Theoretical Computer Science, 2014.
"""
S = cwt_matrix(sketch_size, input_matrix.shape[0], seed)
return S.dot(input_matrix)
```
|
Vidkun or Vidkunn is a given name. Notable people with the name include:
Vidkun Quisling (1887–1945), Norwegian military officer and politician
Vidkunn Hveding (1921–2001), Norwegian politician
Vidkunn Nitter Schreiner, editor of Bergens Tidende, 1942–1945
|
Albert Miller may refer to:
Bert Miller (footballer) (Albert Bertrand W. Miller, 1880–1953), English footballer
Allie Miller (Albert Crist Miller, 1886–?), American football player and coach
Alan Mills (musician) (1912–1977), born Albert Miller, Canadian singer
Albert Roger Miller, known as Roger Milla (born 1952), Cameroonian footballer
Albert Miller (athlete) (born 1957), Fijian decathlete and hurdler
Albert C. Miller (1898–1979), American attorney
See also
Al Miller (disambiguation), several people
Bert Miller (disambiguation), several people
Albert Miller Lea (1808–1891), American engineer, soldier and topographer
|
```turing
.\" $OpenBSD: 1.t,v 1.5 2003/06/02 20:06:15 millert Exp $
.\" $NetBSD: 1.t,v 1.3 1996/04/05 01:45:44 cgd Exp $
.\"
.\" The Regents of the University of California. All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\" 3. Neither the name of the University nor the names of its contributors
.\" may be used to endorse or promote products derived from this software
.\" without specific prior written permission.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" @(#)1.t 8.1 (Berkeley) 6/5/93
.\"
.ds RH Introduction
.NH
Introduction
.PP
This document reflects the use of
.I fsck_ffs
with the 4.2BSD and 4.3BSD file system organization. This
is a revision of the
original paper written by
T. J. Kowalski.
.PP
When a UNIX
operating system is brought up, a consistency
check of the file systems should always be performed.
This precautionary measure helps to insure
a reliable environment for file storage on disk.
If an inconsistency is discovered,
corrective action must be taken.
.I Fsck_ffs
runs in two modes.
Normally it is run non-interactively by the system after
a normal boot.
When running in this mode,
it will only make changes to the file system that are known
to always be correct.
If an unexpected inconsistency is found
.I fsck_ffs
will exit with a non-zero exit status,
leaving the system running single-user.
Typically the operator then runs
.I fsck_ffs
interactively.
When running in this mode,
each problem is listed followed by a suggested corrective action.
The operator must decide whether or not the suggested correction
should be made.
.PP
The purpose of this memo is to dispel the
mystique surrounding
file system inconsistencies.
It first describes the updating of the file system
(the calm before the storm) and
then describes file system corruption (the storm).
Finally,
the set of deterministic corrective actions
used by
.I fsck_ffs
(the Coast Guard
to the rescue) is presented.
.ds RH Overview of the File System
```
|
"Wild Child" is a single by Irish singer-songwriter Enya. It was released on 19 March 2001 as the second and final single from her fifth studio album, A Day Without Rain (2000).
Release
In Germany, Japan, and Korea, the single was published only on Compact Disc; in the United Kingdom, it was also published on cassette. The B-side "Midnight Blue" was later reworked and included as the title track on Enya's 2008 studio album, And Winter Came ….
Live performances
Enya performed the song at the 2001 Japan Gold Disc Award, after receiving an award for Best International Pop Albums of the Year for A Day Without Rain.
Covers and remixes
Eurodance music duo CJ Crew recorded an uptempo dance mix of the song, which appeared on the compilation album Dancemania Speed 10 (2002).
Track listings
Maxi-CD single
"Wild Child" – 3:33
"Midnight Blue" – 2:04
"Song of the Sandman (Lullaby)" – 3:40
Cassette single
"Wild Child" – 3:33
"Isobella" – 4:27
Charts
Release history
References
External links
2000 songs
2001 singles
Enya songs
Songs with lyrics by Roma Ryan
Songs with music by Enya
Warner Music Group singles
|
```go
// Unless explicitly stated otherwise all files in this repository are licensed
// This product includes software developed at Datadog (path_to_url
//go:build test
// Package senderhelper provides a set of fx options for providing a mock
// sender for the demultiplexer.
package senderhelper
import (
"go.uber.org/fx"
"github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer"
"github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl"
"github.com/DataDog/datadog-agent/comp/core"
"github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder"
"github.com/DataDog/datadog-agent/pkg/aggregator/mocksender"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
)
// Opts is a set of options for providing a demux with a mock sender.
// We can remove this if the Sender is ever exposed as a component.
var Opts = fx.Options(
defaultforwarder.MockModule(),
demultiplexerimpl.MockModule(),
core.MockBundle(),
fx.Provide(func() (*mocksender.MockSender, sender.Sender) {
mockSender := mocksender.NewMockSender("mock-sender")
mockSender.SetupAcceptAll()
return mockSender, mockSender
}),
fx.Decorate(func(demux demultiplexer.Mock, s sender.Sender) demultiplexer.Component {
demux.SetDefaultSender(s)
return demux
}),
)
```
|
Visitation is a 1503 woodcut by the German Renaissance artist Albrecht Dürer, from his series on the Life of the Virgin. It depicts the Visitation, an episode in the Gospel of Luke, when Mary, heavily pregnant, travels to see her much older cousin Elisabeth, who is now also late with child.
The women embrace at the house of Elisabeth's husband Zacharias, who is shown standing at the doorway to the left of the woodcut. Both Zacharias and his wife are old; and he is struck into silence by the fact of his long barren wife having finally conceived a child.
The highly detailed landscape shown in the background is likely inspired by the artist's two journeys through the Alps during 1494–95.
See also
Joachim and Anne Meeting at the Golden Gate (Dürer), another in the series.
List of engravings by Albrecht Dürer
List of woodcuts by Albrecht Dürer
Notes
Sources
Nürnberg, Verlag Hans Carl. Dürer in Dublin: Engravings and woodcuts of Albrecht Dürer. Chester Beatty Library, 1983
Prints by Albrecht Dürer
Prints based on the Bible
Prints including the Virgin Mary
16th-century prints
Woodcuts
Catholic engraving
|
```javascript
/*
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing,
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* specific language governing permissions and limitations
*/
/**
* This is the parent controller for all kinds of Delivery Service forms - edit,
* creation, request, etc.
*
* @param {import("../../../api/DeliveryServiceService").DeliveryService} deliveryService
* @param {import("../../../api/DeliveryServiceService").DeliveryService | undefined} dsCurrent
* @param {unknown} origin
* @param {unknown[]} topologies
* @param {string} type
* @param {{name: string}[]} types
* @param {import("angular").IScope & Record<PropertyKey, any>} $scope
* @param {import("../../../service/utils/FormUtils")} formUtils
* @param {import("../../../service/utils/TenantUtils")} tenantUtils
* @param {import("../../../service/utils/DeliveryServiceUtils")} deliveryServiceUtils
* @param {import("../../../api/DeliveryServiceService")} deliveryServiceService
* @param {import("../../../api/CDNService")} cdnService
* @param {import("../../../api/ProfileService")} profileService
* @param {import("../../../api/TenantService")} tenantService
* @param {import("../../../models/PropertiesModel")} propertiesModel
* @param {import("../../../models/UserModel")} userModel
* @param {import("../../../api/ServerCapabilityService")} serverCapabilityService
* @param {import("../../../api/ServiceCategoryService")} serviceCategoryService
*/
var FormDeliveryServiceController = function(deliveryService, dsCurrent, origin, topologies, type, types, $scope, formUtils, tenantUtils, deliveryServiceUtils, locationUtils, deliveryServiceService, cdnService, profileService, tenantService, propertiesModel, userModel, serverCapabilityService, serviceCategoryService) {
/**
* This is used to cache TLS version settings when the checkbox is toggled.
* @type null | [string, ...string[]]
*/
let cachedTLSVersions = null;
$scope.exposeInactive = !!(propertiesModel.properties.deliveryServices?.exposeInactive);
$scope.showSensitive = false;
const knownVersions = new Set(["1.0", "1.1", "1.2", "1.3"]);
/**
* Checks if a TLS version is unknown.
* @param {string} v
*/
$scope.tlsVersionUnknown = v => v && !knownVersions.has(v);
const insecureVersions = new Set(["1.0", "1.1"]);
/**
* Checks if a TLS version is known to be insecure.
* @param {string} v
*/
$scope.tlsVersionInsecure = v => v && insecureVersions.has(v);
/**
* This toggles whether TLS versions are restricted for the Delivery
* Service.
*
* It uses cachedTLSVersions to cache TLS version restrictions, so that the
* DS is always ready to submit without manipulation, but the UI "remembers"
* the TLS versions that existed on toggling restrictions off.
*
* This is called when the checkbox's 'change' event fires - that event is
* not handled here.
*/
function toggleTLSRestrict() {
if ($scope.restrictTLS) {
if (cachedTLSVersions instanceof Array && cachedTLSVersions.length > 0) {
deliveryService.tlsVersions = cachedTLSVersions;
} else {
deliveryService.tlsVersions = [""];
}
cachedTLSVersions = null;
return;
}
if (deliveryService.tlsVersions instanceof Array && deliveryService.tlsVersions.length > 0) {
cachedTLSVersions = deliveryService.tlsVersions;
} else {
cachedTLSVersions = null;
}
deliveryService.tlsVersions = null;
}
$scope.toggleTLSRestrict = toggleTLSRestrict;
$scope.hasGeoLimitCountries = function(ds) {
return ds !== undefined && (ds.geoLimit === 1 || ds.geoLimit === 2);
}
$scope.navigateToPath = (path, unsavedChanges) => locationUtils.navigateToPath(path, unsavedChanges);
$scope.loadGeoLimitCountriesRaw = function (ds) {
if($scope.hasGeoLimitCountries(ds)) {
ds.geoLimitCountriesRaw = (ds.geoLimitCountries ?? []).join(",");
} else {
ds.geoLimitCountriesRaw = "";
}
}
$scope.loadGeoLimitCountries = function (ds) {
if($scope.hasGeoLimitCountries(ds)) {
ds.geoLimitCountries = ds.geoLimitCountriesRaw.split(",");
} else {
ds.geoLimitCountriesRaw = "";
ds.geoLimitCountries = [];
}
}
/**
* Removes a TLS version at the given index.
* @param {number} index
*/
$scope.removeTLSVersion = function(index) {
deliveryService.tlsVersions?.splice(index, 1);
};
/**
* Adds a TLS version at the given index.
* @param {number} index
*/
$scope.addTLSVersion = function(index) {
deliveryService.tlsVersions?.splice(index+1, 0, "");
};
/** Compare Arrays
*
* @template T extends number[] | boolean[] | bigint[] | string[]
*
* @param {T} a
* @param {T} b
* @returns `false` if the arrays are equal, `true` otherwise.
*/
function arrayCompare (a, b) {
if (a === b) return false;
if (a.length !== b.length) return true;
for (let i = 0; i < a.length; i++) {
if (a[i] !== b[i]) return true;
}
return false;
};
$scope.arrayCompare = arrayCompare;
/**
* This function is called when capability is updated on a DSR
*/
function capabilityChange() {
const cap = [];
for (const [key, value] of Object.entries($scope.selectedCapabilities)) {
if (value) {
cap.push(key);
}
}
deliveryService.requiredCapabilities = cap;
}
$scope.capabilityChange = capabilityChange;
/**
* This function is called on 'change' events for any and all TLS Version
* inputs, and sets validity states of duplicates.
*
* This can't use a normal validator because it depends on a value checking
* against a list containing itself. AngularJS sets values that fail
* validation to `undefined`, so if there's a set of TLS versions
* `["1.3", "1.3"]`, then the validator will set one of them to `undefined`.
* Now the set is `["1.3", undefined]`, so there are no more duplicates, so
* the set is marked as valid.
*/
function validateTLS() {
if (!$scope.generalConfig || !($scope.deliveryService.tlsVersions instanceof Array)) {
return;
}
const verMap = new Map();
for (let i = 0; i < $scope.deliveryService.tlsVersions.length; ++i) {
const propName = `tlsVersion${i+1}`;
if (propName in $scope.generalConfig) {
$scope.generalConfig[propName].$setValidity("duplicates", true);
}
const ver = $scope.deliveryService.tlsVersions[i];
if (ver === undefined) {
continue;
}
const current = verMap.get(ver);
if (current) {
current.count++;
current.indices.push(i);
} else {
verMap.set(ver, {
count: 1,
indices: [i]
});
}
}
for (const index of Array.from(verMap).filter(v=>v[1].count>1).flatMap(v=>v[1].indices)) {
const propName = `tlsVersion${index+1}`;
if (propName in $scope.generalConfig) {
$scope.generalConfig[propName].$setValidity("duplicates", false);
}
}
}
$scope.validateTLS = validateTLS;
async function getSteeringTargets() {
if(type.indexOf("HTTP") > -1) {
const configs = await deliveryServiceService.getSteering();
const dsTargets = deliveryServiceUtils.getSteeringTargetsForDS([deliveryService.xmlId], configs);
$scope.steeringTargetsFor = Array.from(dsTargets[deliveryService.xmlId]);
}
}
/**
* Updates the CDNs on the $scope.
* @returns {Promise<void>}
*/
async function getCDNs() {
$scope.cdns = await cdnService.getCDNs();
}
/**
* Updates the Profiles on the $scope.
* @returns {Promise<void>}
*/
async function getProfiles() {
/** @type {{type: string}[]} */
const result = await profileService.getProfiles({ orderby: "name" });
$scope.profiles = result.filter(p => p.type === "DS_PROFILE");
}
/**
* Updates the Tenants on the $scope.
* @returns {Promise<void>}
*/
async function getTenants() {
const tenants = await tenantService.getTenants();
const tenant = tenants.find(t => t.id === userModel.user.tenantId);
$scope.tenants = tenantUtils.hierarchySort(tenantUtils.groupTenantsByParent(tenants), tenant?.parentId, []);
tenantUtils.addLevels($scope.tenants);
}
$scope.selectedCapabilities = {};
/**
* Updates the server Capabilities on the $scope.
* @returns {Promise<void>}
*/
async function getRequiredCapabilities() {
$scope.requiredCapabilities = await serverCapabilityService.getServerCapabilities();
$scope.selectedCapabilities = Object.fromEntries($scope.requiredCapabilities.map(dsc => [dsc.name, $scope.deliveryService.requiredCapabilities.includes(dsc.name)]))
}
/**
* Updates the Service Categories on the $scope.
* @returns {Promise<void>}
*/
async function getServiceCategories() {
$scope.serviceCategories = await serviceCategoryService.getServiceCategories({dsId: deliveryService.id })
}
/**
* Formats the 'dsCurrent' active flag into a human-readable string. Returns
* an empty string if dsCurrent isn't defined.
*
* @returns {string}
*/
function formatCurrentActive() {
if (!dsCurrent) {
return "";
}
let {active} = dsCurrent;
if (!propertiesModel.properties.deliveryServices?.exposeInactive && active !== "ACTIVE") {
active = "INACTIVE";
}
return active.split(" ").map(w => w[0].toUpperCase() + w.substring(1).toLowerCase()).join(" ");
}
$scope.formatCurrentActive = formatCurrentActive;
$scope.deliveryService = deliveryService;
$scope.showGeneralConfig = true;
$scope.showCacheConfig = true;
$scope.showRoutingConfig = true;
$scope.dsCurrent = dsCurrent; // this ds is used primarily for showing the diff between a ds request and the current DS
$scope.origin = Array.isArray(origin) ? origin[0] : origin;
$scope.topologies = topologies;
$scope.showChartsButton = !!(propertiesModel.properties.deliveryServices?.charts?.customLink?.show);
$scope.openCharts = ds => deliveryServiceUtils.openCharts(ds);
$scope.dsRequestsEnabled = !!(propertiesModel.properties.dsRequests?.enabled);
/**
* Gods have mercy.
*
* @param {import("../../../api/DeliveryServiceService").DeliveryService} ds
* @returns {string | undefined} An absolutely unsafe direct HTML segment.
*/
$scope.edgeFQDNs = function(ds) {
return ds.exampleURLs?.join("<br/>");
};
$scope.DRAFT = 0;
$scope.SUBMITTED = 1;
$scope.REJECTED = 2;
$scope.PENDING = 3;
$scope.COMPLETE = 4;
// these may be overriden in a child class. i.e. FormEditDeliveryServiceController
$scope.saveable = () => true;
$scope.deletable = () => true;
$scope.types = types.filter(currentType => {
let category;
if (type.includes("ANY_MAP")) {
category = "ANY_MAP";
} else if (type.includes("DNS")) {
category = "DNS";
} else if (type.includes("HTTP")) {
category = "HTTP";
} else if (type.includes("STEERING")) {
category = 'STEERING';
} else {
throw new Error(`unrecognized type: '${type}'`);
}
return currentType.name.includes(category);
});
$scope.clientSteeringType = types.find(t => t.name === "CLIENT_STEERING");
/**
* Checks if a given Delivery Service uses the "Client Steering" flavor of
* Steering-based routing.
*
* @param {import("../../../api/DeliveryServiceService").DeliveryService} ds The Delivery Service in question.
* @returns {boolean} `true` if `ds` uses
*/
$scope.isClientSteering = function(ds) {
if (ds.typeId == $scope.clientSteeringType.id) {
return true;
} else {
ds.trResponseHeaders = "";
return false;
}
};
$scope.signingAlgos = [
{ value: null, label: 'None' },
{ value: 'url_sig', label: 'URL Signature Keys' },
{ value: 'uri_signing', label: 'URI Signing Keys' }
];
$scope.protocols = [
{ value: 0, label: 'HTTP' },
{ value: 1, label: 'HTTPS' },
{ value: 2, label: 'HTTP AND HTTPS' },
{ value: 3, label: 'HTTP TO HTTPS' }
];
$scope.qStrings = [
{ value: 0, label: 'Use query parameter strings in cache key and pass in upstream requests' },
{ value: 1, label: 'Do not use query parameter strings in cache key, but do pass in upstream requests' },
{ value: 2, label: 'Neither use query parameter strings in cache key, nor pass in upstream requests' }
];
$scope.geoLimits = [
{ value: 0, label: 'None' },
{ value: 1, label: 'Coverage Zone File only' },
{ value: 2, label: 'Coverage Zone File and Country Code(s)' }
];
$scope.geoProviders = [
{ value: 0, label: 'Maxmind' },
{ value: 1, label: 'Neustar' }
];
$scope.dscps = [
{ value: 0, label: '0 - Best Effort' },
{ value: 10, label: '10 - AF11' },
{ value: 12, label: '12 - AF12' },
{ value: 14, label: '14 - AF13' },
{ value: 18, label: '18 - AF21' },
{ value: 20, label: '20 - AF22' },
{ value: 22, label: '22 - AF23' },
{ value: 26, label: '26 - AF31' },
{ value: 28, label: '28 - AF32' },
{ value: 30, label: '30 - AF33' },
{ value: 34, label: '34 - AF41' },
{ value: 36, label: '36 - AF42' },
{ value: 37, label: '37 - ' },
{ value: 38, label: '38 - AF43' },
{ value: 8, label: '8 - CS1' },
{ value: 16, label: '16 - CS2' },
{ value: 24, label: '24 - CS3' },
{ value: 32, label: '32 - CS4' },
{ value: 40, label: '40 - CS5' },
{ value: 48, label: '48 - CS6' },
{ value: 56, label: '56 - CS7' }
];
$scope.rrhs = [
{ value: 0, label: "Don't cache Range Requests" },
{ value: 1, label: "Use the background_fetch plugin" },
{ value: 2, label: "Use the cache_range_requests plugin" },
{ value: 3, label: "Use the slice plugin" }
];
$scope.msoAlgos = [
{ value: 0, label: "0 - Consistent Hash" },
{ value: 1, label: "1 - Primary/Backup" },
{ value: 2, label: "2 - Strict Round Robin" },
{ value: 3, label: "3 - IP-based Round Robin" },
{ value: 4, label: "4 - Latch on Failover" }
];
/**
* Handles changes to the set signing algorithm used by the Delivery Service
* by updating the legacy 'signed' property accordingly.
*
* @param {null|string} signingAlgorithm
*/
$scope.changeSigningAlgorithm = function(signingAlgorithm) {
if (signingAlgorithm === null) {
deliveryService.signed = false;
} else {
deliveryService.signed = true;
}
};
/**
* Encodes the given regular expression into $scope.encodedRegex.
* @param {string} consistentHashRegex
*/
$scope.encodeRegex = function(consistentHashRegex) {
if (consistentHashRegex !== undefined) {
$scope.encodedRegex = encodeURIComponent(consistentHashRegex);
} else {
$scope.encodedRegex = "";
}
};
/**
* Adds a blank consistent hashing query string parameter to the Delivery
* Service.
*/
$scope.addQueryParam = () => $scope.deliveryService.consistentHashQueryParams.push("");
/**
* Removes a consistent hashing query string parameter from the Delivery
* Service at the given index.
*
* @param {number} index
*/
$scope.removeQueryParam = function(index) {
if ($scope.deliveryService.consistentHashQueryParams.length > 1) {
$scope.deliveryService.consistentHashQueryParams.splice(index, 1);
} else {
// if only one query param is left, don't remove the item from the array. instead, just blank it out
// so the dynamic form widget will still be visible. empty strings get stripped out on save anyhow.
$scope.deliveryService.consistentHashQueryParams[index] = "";
}
$scope.deliveryServiceForm.$pristine = false; // this enables the 'update' button in the ds form
};
$scope.hasError = input => formUtils.hasError(input);
/**
* Checks if a TLS Version has a specific error.
*
* @param {number} index The index of the TLS Version to check into the
* form's Delivery Service's `tlsVersions` array.
* @param {string} property The name of the error to check.
* @returns {boolean} Whether or not the indicated TLS Version has the given
* error.
*/
function tlsVersionHasPropertyError(index, property) {
if (!$scope.generalConfig) {
return false;
}
const propName = `tlsVersion${index+1}`;
if (!(propName in $scope.generalConfig)) {
return false;
}
return formUtils.hasPropertyError($scope.generalConfig[propName], property);
}
$scope.tlsVersionHasPropertyError = tlsVersionHasPropertyError;
this.$onInit = function() {
$scope.loadGeoLimitCountriesRaw(deliveryService);
$scope.loadGeoLimitCountriesRaw(dsCurrent);
}
/**
* Checks if a TLS Version has any error.
*
* @param {number} index The index of the TLS Version to check into the
* form's Delivery Service's `tlsVersions` array.
* @returns {boolean} Whether or not the indicated TLS Version has an error.
*/
function tlsVersionHasError(index) {
if (!$scope.generalConfig) {
return false;
}
const propName = `tlsVersion${index+1}`;
if (!(propName in $scope.generalConfig)) {
return false;
}
return formUtils.hasError($scope.generalConfig[propName]);
}
$scope.tlsVersionHasError = tlsVersionHasError;
$scope.hasPropertyError = (input, property) => formUtils.hasPropertyError(input, property);
$scope.rangeRequestSelected = function() {
if ($scope.deliveryService.rangeRequestHandling != 3) {
$scope.deliveryService.rangeSliceBlockSize = null;
}
};
getCDNs();
getProfiles();
getTenants();
getRequiredCapabilities();
getServiceCategories();
getSteeringTargets();
if (!deliveryService.consistentHashQueryParams || deliveryService.consistentHashQueryParams.length < 1) {
// add an empty one so the dynamic form widget is visible. empty strings get stripped out on save anyhow.
$scope.deliveryService.consistentHashQueryParams = [ "" ];
}
if (deliveryService.lastUpdated) {
// TS checkers hate him for this one weird trick:
// @ts-ignore
deliveryService.lastUpdated = new Date(deliveryService.lastUpdated.replace("+00", "Z"));
// ... the right way to do this is with an interceptor, but nobody
// wants to put in that kinda work on a legacy product.
}
if (!$scope.exposeInactive && deliveryService.active === "INACTIVE") {
deliveryService.active = "PRIMED";
}
};
FormDeliveryServiceController.$inject = ["deliveryService", "dsCurrent", "origin", "topologies", "type", "types", "$scope", "formUtils", "tenantUtils", "deliveryServiceUtils", "locationUtils", "deliveryServiceService", "cdnService", "profileService", "tenantService", "propertiesModel", "userModel", "serverCapabilityService", "serviceCategoryService"];
module.exports = FormDeliveryServiceController;
```
|
```go
/*
path_to_url
Unless required by applicable law or agreed to in writing, software
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package v5
import (
"net/http"
"reflect"
"testing"
"github.com/apache/trafficcontrol/v8/lib/go-tc"
"github.com/apache/trafficcontrol/v8/lib/go-util"
"github.com/apache/trafficcontrol/v8/lib/go-util/assert"
"github.com/apache/trafficcontrol/v8/traffic_ops/testing/api/utils"
"github.com/apache/trafficcontrol/v8/traffic_ops/toclientlib"
client "github.com/apache/trafficcontrol/v8/traffic_ops/v5-client"
)
func TestProfilesImport(t *testing.T) {
WithObjs(t, []TCObj{CDNs, Types, Parameters, Profiles, ProfileParameters}, func() {
methodTests := utils.TestCase[client.Session, client.RequestOptions, tc.ProfileImportRequest]{
"POST": {
"OK when VALID request": {
ClientSession: TOSession,
RequestBody: tc.ProfileImportRequest{
Profile: tc.ProfileExportImportNullable{
Name: util.Ptr("GLOBAL"),
Description: util.Ptr("Global Traffic Ops profile"),
CDNName: util.Ptr("cdn1"),
Type: util.Ptr("UNK_PROFILE"),
},
Parameters: []tc.ProfileExportImportParameterNullable{
{
ConfigFile: util.Ptr("global"),
Name: util.Ptr("tm.instance_name"),
Value: util.Ptr("Traffic Ops CDN"),
},
{
ConfigFile: util.Ptr("global"),
Name: util.Ptr("tm.toolname"),
Value: util.Ptr("Traffic Ops"),
},
},
},
Expectations: utils.CkRequest(utils.NoError(), utils.HasStatus(http.StatusOK),
validateProfilesImport(map[string]interface{}{"Name": "GLOBAL", "CDNName": "cdn1",
"Description": "Global Traffic Ops profile", "Type": "UNK_PROFILE"})),
},
"BAD REQUEST when SPACE in PROFILE NAME": {
ClientSession: TOSession,
RequestBody: tc.ProfileImportRequest{
Profile: tc.ProfileExportImportNullable{
Name: util.Ptr("GLOBAL SPACES"),
Description: util.Ptr("Global Traffic Ops profile"),
CDNName: util.Ptr("cdn1"),
Type: util.Ptr("UNK_PROFILE"),
},
Parameters: []tc.ProfileExportImportParameterNullable{
{
ConfigFile: util.Ptr("global"),
Name: util.Ptr("tm.instance_name"),
Value: util.Ptr("Traffic Ops CDN"),
},
},
},
Expectations: utils.CkRequest(utils.HasError(), utils.HasStatus(http.StatusBadRequest)),
},
},
}
for method, testCases := range methodTests {
t.Run(method, func(t *testing.T) {
for name, testCase := range testCases {
switch method {
case "POST":
t.Run(name, func(t *testing.T) {
resp, reqInf, err := testCase.ClientSession.ImportProfile(testCase.RequestBody, testCase.RequestOpts)
for _, check := range testCase.Expectations {
check(t, reqInf, resp.Response, resp.Alerts, err)
}
})
}
}
})
}
})
}
func validateProfilesImport(expectedResp map[string]interface{}) utils.CkReqFunc {
return func(t *testing.T, _ toclientlib.ReqInf, resp interface{}, _ tc.Alerts, _ error) {
assert.RequireNotNil(t, resp, "Expected Profiles Export response to not be nil.")
profileImportResp := resp.(tc.ProfileImportResponseObj)
profileImport := profileImportResp.ProfileExportImportNullable
for field, expected := range expectedResp {
fieldValue := reflect.Indirect(reflect.ValueOf(profileImport).FieldByName(field)).String()
assert.RequireNotNil(t, fieldValue, "Expected %s to not be nil.", field)
assert.Equal(t, expected, fieldValue, "Expected %s to be %v, but got %s", field, expected, fieldValue)
}
}
}
```
|
```html
<html lang="en">
<head>
<title>Flags - GNU Compiler Collection (GCC) Internals</title>
<meta http-equiv="Content-Type" content="text/html">
<meta name="description" content="GNU Compiler Collection (GCC) Internals">
<meta name="generator" content="makeinfo 4.8">
<link title="Top" rel="start" href="index.html#Top">
<link rel="up" href="RTL.html#RTL" title="RTL">
<link rel="prev" href="Special-Accessors.html#Special-Accessors" title="Special Accessors">
<link rel="next" href="Machine-Modes.html#Machine-Modes" title="Machine Modes">
<link href="path_to_url" rel="generator-home" title="Texinfo Homepage">
<!--
Permission is granted to copy, distribute and/or modify this document
any later version published by the Free Software Foundation; with the
Invariant Sections being ``Funding Free Software'', the Front-Cover
Texts being (a) (see below), and with the Back-Cover Texts being (b)
(see below). A copy of the license is included in the section entitled
(a) The FSF's Front-Cover Text is:
A GNU Manual
(b) The FSF's Back-Cover Text is:
You have freedom to copy and modify this GNU Manual, like GNU
software. Copies published by the Free Software Foundation raise
funds for GNU development.-->
<meta http-equiv="Content-Style-Type" content="text/css">
<style type="text/css"><!--
pre.display { font-family:inherit }
pre.format { font-family:inherit }
pre.smalldisplay { font-family:inherit; font-size:smaller }
pre.smallformat { font-family:inherit; font-size:smaller }
pre.smallexample { font-size:smaller }
pre.smalllisp { font-size:smaller }
span.sc { font-variant:small-caps }
span.roman { font-family:serif; font-weight:normal; }
span.sansserif { font-family:sans-serif; font-weight:normal; }
--></style>
</head>
<body>
<div class="node">
<p>
<a name="Flags"></a>
Next: <a rel="next" accesskey="n" href="Machine-Modes.html#Machine-Modes">Machine Modes</a>,
Previous: <a rel="previous" accesskey="p" href="Special-Accessors.html#Special-Accessors">Special Accessors</a>,
Up: <a rel="up" accesskey="u" href="RTL.html#RTL">RTL</a>
<hr>
</div>
<h3 class="section">13.5 Flags in an RTL Expression</h3>
<p><a name="index-flags-in-RTL-expression-2600"></a>
RTL expressions contain several flags (one-bit bit-fields)
that are used in certain types of expression. Most often they
are accessed with the following macros, which expand into lvalues.
<a name="index-CONSTANT_005fPOOL_005fADDRESS_005fP-2601"></a>
<a name=your_sha256_hash2fu_007d-2602"></a>
<a name=your_sha256_hashbol_005fref_007d-2603"></a>
<dl><dt><code>CONSTANT_POOL_ADDRESS_P (</code><var>x</var><code>)</code><dd>Nonzero in a <code>symbol_ref</code> if it refers to part of the current
function's constant pool. For most targets these addresses are in a
<code>.rodata</code> section entirely separate from the function, but for
some targets the addresses are close to the beginning of the function.
In either case GCC assumes these addresses can be addressed directly,
perhaps with the help of base registers.
Stored in the <code>unchanging</code> field and printed as `<samp><span class="samp">/u</span></samp>'.
<p><a name="index-RTL_005fCONST_005fCALL_005fP-2604"></a><a name=your_sha256_hashfu_007d-2605"></a><a name=your_sha256_hashl_005finsn_007d-2606"></a><br><dt><code>RTL_CONST_CALL_P (</code><var>x</var><code>)</code><dd>In a <code>call_insn</code> indicates that the insn represents a call to a
const function. Stored in the <code>unchanging</code> field and printed as
`<samp><span class="samp">/u</span></samp>'.
<p><a name="index-RTL_005fPURE_005fCALL_005fP-2607"></a><a name=your_sha256_hashfi_007d-2608"></a><a name=your_sha256_hashbcall_005finsn_007d-2609"></a><br><dt><code>RTL_PURE_CALL_P (</code><var>x</var><code>)</code><dd>In a <code>call_insn</code> indicates that the insn represents a call to a
pure function. Stored in the <code>return_val</code> field and printed as
`<samp><span class="samp">/i</span></samp>'.
<p><a name="index-RTL_005fCONST_005fOR_005fPURE_005fCALL_005fP-2610"></a><a name=your_sha256_hashfu_007d-or-_0040samp_007b_002fi_007d-2611"></a><br><dt><code>RTL_CONST_OR_PURE_CALL_P (</code><var>x</var><code>)</code><dd>In a <code>call_insn</code>, true if <code>RTL_CONST_CALL_P</code> or
<code>RTL_PURE_CALL_P</code> is true.
<p><a name=your_sha256_hash612"></a><a name=your_sha256_hashfc_007d-2613"></a><a name=your_sha256_hashinsn_007d-2614"></a><br><dt><code>RTL_LOOPING_CONST_OR_PURE_CALL_P (</code><var>x</var><code>)</code><dd>In a <code>call_insn</code> indicates that the insn represents a possibly
infinite looping call to a const or pure function. Stored in the
<code>call</code> field and printed as `<samp><span class="samp">/c</span></samp>'. Only true if one of
<code>RTL_CONST_CALL_P</code> or <code>RTL_PURE_CALL_P</code> is true.
<p><a name="index-INSN_005fANNULLED_005fBRANCH_005fP-2615"></a><a name=your_sha256_hashfu_007d-2616"></a><a name=your_sha256_hashfu_007d-2617"></a><a name=your_sha256_hash618"></a><a name=your_sha256_hashyour_sha256_hashode_007binsn_007d-2619"></a><br><dt><code>INSN_ANNULLED_BRANCH_P (</code><var>x</var><code>)</code><dd>In a <code>jump_insn</code>, <code>call_insn</code>, or <code>insn</code> indicates
that the branch is an annulling one. See the discussion under
<code>sequence</code> below. Stored in the <code>unchanging</code> field and
printed as `<samp><span class="samp">/u</span></samp>'.
<p><a name="index-INSN_005fDELETED_005fP-2620"></a><a name=your_sha256_hash621"></a><a name=your_sha256_hashfv_007d-2622"></a><a name=your_sha256_hashfv_007d-2623"></a><a name=your_sha256_hash2fv_007d-2624"></a><a name=your_sha256_hashp_007b_002fv_007d-2625"></a><a name=your_sha256_hashd-2626"></a><a name=your_sha256_hash627"></a><a name=your_sha256_hashyour_sha256_hashyour_sha256_hashyour_sha256_hashr_007d_002c-and-_0040code_007bnote_007d-2628"></a><br><dt><code>INSN_DELETED_P (</code><var>x</var><code>)</code><dd>In an <code>insn</code>, <code>call_insn</code>, <code>jump_insn</code>, <code>code_label</code>,
<code>jump_table_data</code>, <code>barrier</code>, or <code>note</code>,
nonzero if the insn has been deleted. Stored in the
<code>volatil</code> field and printed as `<samp><span class="samp">/v</span></samp>'.
<p><a name="index-INSN_005fFROM_005fTARGET_005fP-2629"></a><a name=your_sha256_hash630"></a><a name=your_sha256_hashfs_007d-2631"></a><a name=your_sha256_hashfs_007d-2632"></a><a name=your_sha256_hashyour_sha256_hashbcall_005finsn_007d-2633"></a><br><dt><code>INSN_FROM_TARGET_P (</code><var>x</var><code>)</code><dd>In an <code>insn</code> or <code>jump_insn</code> or <code>call_insn</code> in a delay
slot of a branch, indicates that the insn
is from the target of the branch. If the branch insn has
<code>INSN_ANNULLED_BRANCH_P</code> set, this insn will only be executed if
the branch is taken. For annulled branches with
<code>INSN_FROM_TARGET_P</code> clear, the insn will be executed only if the
branch is not taken. When <code>INSN_ANNULLED_BRANCH_P</code> is not set,
this insn will always be executed. Stored in the <code>in_struct</code>
field and printed as `<samp><span class="samp">/s</span></samp>'.
<p><a name="index-LABEL_005fPRESERVE_005fP-2634"></a><a name=your_sha256_hash2fi_007d-2635"></a><a name=your_sha256_hash636"></a><a name=your_sha256_hashcode_005flabel_007d-and-_0040code_007bnote_007d-2637"></a><br><dt><code>LABEL_PRESERVE_P (</code><var>x</var><code>)</code><dd>In a <code>code_label</code> or <code>note</code>, indicates that the label is referenced by
code or data not visible to the RTL of a given function.
Labels referenced by a non-local goto will have this bit set. Stored
in the <code>in_struct</code> field and printed as `<samp><span class="samp">/s</span></samp>'.
<p><a name="index-LABEL_005fREF_005fNONLOCAL_005fP-2638"></a><a name=your_sha256_hashfv_007d-2639"></a><a name=your_sha256_hashfv_007d-2640"></a><a name=your_sha256_hash005fref_007d-and-_0040code_007breg_005flabel_007d-2641"></a><br><dt><code>LABEL_REF_NONLOCAL_P (</code><var>x</var><code>)</code><dd>In <code>label_ref</code> and <code>reg_label</code> expressions, nonzero if this is
a reference to a non-local label.
Stored in the <code>volatil</code> field and printed as `<samp><span class="samp">/v</span></samp>'.
<p><a name="index-MEM_005fKEEP_005fALIAS_005fSET_005fP-2642"></a><a name=your_sha256_hash43"></a><a name=your_sha256_hash2644"></a><br><dt><code>MEM_KEEP_ALIAS_SET_P (</code><var>x</var><code>)</code><dd>In <code>mem</code> expressions, 1 if we should keep the alias set for this
mem unchanged when we access a component. Set to 1, for example, when we
are already in a non-addressable component of an aggregate.
Stored in the <code>jump</code> field and printed as `<samp><span class="samp">/j</span></samp>'.
<p><a name="index-MEM_005fVOLATILE_005fP-2645"></a><a name=your_sha256_hash46"></a><a name=your_sha256_hashfv_007d-2647"></a><a name=your_sha256_hash002fv_007d-2648"></a><a name=your_sha256_hashyour_sha256_hash07basm_005finput_007d-2649"></a><br><dt><code>MEM_VOLATILE_P (</code><var>x</var><code>)</code><dd>In <code>mem</code>, <code>asm_operands</code>, and <code>asm_input</code> expressions,
nonzero for volatile memory references.
Stored in the <code>volatil</code> field and printed as `<samp><span class="samp">/v</span></samp>'.
<p><a name="index-MEM_005fNOTRAP_005fP-2650"></a><a name=your_sha256_hash51"></a><a name=your_sha256_hash2652"></a><br><dt><code>MEM_NOTRAP_P (</code><var>x</var><code>)</code><dd>In <code>mem</code>, nonzero for memory references that will not trap.
Stored in the <code>call</code> field and printed as `<samp><span class="samp">/c</span></samp>'.
<p><a name="index-MEM_005fPOINTER-2653"></a><a name=your_sha256_hash54"></a><a name=your_sha256_hash007bmem_007d-2655"></a><br><dt><code>MEM_POINTER (</code><var>x</var><code>)</code><dd>Nonzero in a <code>mem</code> if the memory reference holds a pointer.
Stored in the <code>frame_related</code> field and printed as `<samp><span class="samp">/f</span></samp>'.
<p><a name="index-REG_005fFUNCTION_005fVALUE_005fP-2656"></a><a name=your_sha256_hash57"></a><a name=your_sha256_hashbreg_007d-2658"></a><br><dt><code>REG_FUNCTION_VALUE_P (</code><var>x</var><code>)</code><dd>Nonzero in a <code>reg</code> if it is the place in which this function's
value is going to be returned. (This happens only in a hard
register.) Stored in the <code>return_val</code> field and printed as
`<samp><span class="samp">/i</span></samp>'.
<p><a name="index-REG_005fPOINTER-2659"></a><a name=your_sha256_hash60"></a><a name=your_sha256_hash007breg_007d-2661"></a><br><dt><code>REG_POINTER (</code><var>x</var><code>)</code><dd>Nonzero in a <code>reg</code> if the register holds a pointer. Stored in the
<code>frame_related</code> field and printed as `<samp><span class="samp">/f</span></samp>'.
<p><a name="index-REG_005fUSERVAR_005fP-2662"></a><a name=your_sha256_hash63"></a><a name=your_sha256_hash7d-2664"></a><br><dt><code>REG_USERVAR_P (</code><var>x</var><code>)</code><dd>In a <code>reg</code>, nonzero if it corresponds to a variable present in
the user's source code. Zero for temporaries generated internally by
the compiler. Stored in the <code>volatil</code> field and printed as
`<samp><span class="samp">/v</span></samp>'.
<p>The same hard register may be used also for collecting the values of
functions called by this one, but <code>REG_FUNCTION_VALUE_P</code> is zero
in this kind of use.
<p><a name="index-RTX_005fFRAME_005fRELATED_005fP-2665"></a><a name=your_sha256_hash666"></a><a name=your_sha256_hashff_007d-2667"></a><a name=your_sha256_hashff_007d-2668"></a><a name=your_sha256_hashd-2669"></a><a name=your_sha256_hash70"></a><a name=your_sha256_hashyour_sha256_hashyour_sha256_hashnd-_0040code_007bset_007d-2671"></a><br><dt><code>RTX_FRAME_RELATED_P (</code><var>x</var><code>)</code><dd>Nonzero in an <code>insn</code>, <code>call_insn</code>, <code>jump_insn</code>,
<code>barrier</code>, or <code>set</code> which is part of a function prologue
and sets the stack pointer, sets the frame pointer, or saves a register.
This flag should also be set on an instruction that sets up a temporary
register to use in place of the frame pointer.
Stored in the <code>frame_related</code> field and printed as `<samp><span class="samp">/f</span></samp>'.
<p>In particular, on RISC targets where there are limits on the sizes of
immediate constants, it is sometimes impossible to reach the register
save area directly from the stack pointer. In that case, a temporary
register is used that is near enough to the register save area, and the
Canonical Frame Address, i.e., DWARF2's logical frame pointer, register
must (temporarily) be changed to be this temporary register. So, the
instruction that sets this temporary register must be marked as
<code>RTX_FRAME_RELATED_P</code>.
<p>If the marked instruction is overly complex (defined in terms of what
<code>dwarf2out_frame_debug_expr</code> can handle), you will also have to
create a <code>REG_FRAME_RELATED_EXPR</code> note and attach it to the
instruction. This note should contain a simple expression of the
computation performed by this instruction, i.e., one that
<code>dwarf2out_frame_debug_expr</code> can handle.
<p>This flag is required for exception handling support on targets with RTL
prologues.
<p><a name="index-MEM_005fREADONLY_005fP-2672"></a><a name=your_sha256_hash73"></a><a name=your_sha256_hash_007d-2674"></a><br><dt><code>MEM_READONLY_P (</code><var>x</var><code>)</code><dd>Nonzero in a <code>mem</code>, if the memory is statically allocated and read-only.
<p>Read-only in this context means never modified during the lifetime of the
program, not necessarily in ROM or in write-disabled pages. A common
example of the later is a shared library's global offset table. This
table is initialized by the runtime loader, so the memory is technically
writable, but after control is transferred from the runtime loader to the
application, this memory will never be subsequently modified.
<p>Stored in the <code>unchanging</code> field and printed as `<samp><span class="samp">/u</span></samp>'.
<p><a name="index-SCHED_005fGROUP_005fP-2675"></a><a name=your_sha256_hash676"></a><a name=your_sha256_hashfs_007d-2677"></a><a name=your_sha256_hashfs_007d-2678"></a><a name=your_sha256_hashp_007b_002fs_007d-2679"></a><a name=your_sha256_hashyour_sha256_hashyour_sha256_hash007d-2680"></a><br><dt><code>SCHED_GROUP_P (</code><var>x</var><code>)</code><dd>During instruction scheduling, in an <code>insn</code>, <code>call_insn</code>,
<code>jump_insn</code> or <code>jump_table_data</code>, indicates that the
previous insn must be scheduled together with this insn. This is used to
ensure that certain groups of instructions will not be split up by the
instruction scheduling pass, for example, <code>use</code> insns before
a <code>call_insn</code> may not be separated from the <code>call_insn</code>.
Stored in the <code>in_struct</code> field and printed as `<samp><span class="samp">/s</span></samp>'.
<p><a name="index-SET_005fIS_005fRETURN_005fP-2681"></a><a name=your_sha256_hash682"></a><a name=your_sha256_hash-2683"></a><br><dt><code>SET_IS_RETURN_P (</code><var>x</var><code>)</code><dd>For a <code>set</code>, nonzero if it is for a return.
Stored in the <code>jump</code> field and printed as `<samp><span class="samp">/j</span></samp>'.
<p><a name="index-SIBLING_005fCALL_005fP-2684"></a><a name=your_sha256_hashfj_007d-2685"></a><a name=your_sha256_hashinsn_007d-2686"></a><br><dt><code>SIBLING_CALL_P (</code><var>x</var><code>)</code><dd>For a <code>call_insn</code>, nonzero if the insn is a sibling call.
Stored in the <code>jump</code> field and printed as `<samp><span class="samp">/j</span></samp>'.
<p><a name="index-STRING_005fPOOL_005fADDRESS_005fP-2687"></a><a name=your_sha256_hash2ff_007d-2688"></a><a name=your_sha256_hash007bsymbol_005fref_007d-2689"></a><br><dt><code>STRING_POOL_ADDRESS_P (</code><var>x</var><code>)</code><dd>For a <code>symbol_ref</code> expression, nonzero if it addresses this function's
string constant pool.
Stored in the <code>frame_related</code> field and printed as `<samp><span class="samp">/f</span></samp>'.
<p><a name="index-SUBREG_005fPROMOTED_005fUNSIGNED_005fP-2690"></a><a name=your_sha256_hash-and-_0040samp_007b_002fv_007d-2691"></a><a name=your_sha256_hashreg_007d-2692"></a><a name=your_sha256_hash_007d-2693"></a><br><dt><code>SUBREG_PROMOTED_UNSIGNED_P (</code><var>x</var><code>)</code><dd>Returns a value greater then zero for a <code>subreg</code> that has
<code>SUBREG_PROMOTED_VAR_P</code> nonzero if the object being referenced is kept
zero-extended, zero if it is kept sign-extended, and less then zero if it is
extended some other way via the <code>ptr_extend</code> instruction.
Stored in the <code>unchanging</code>
field and <code>volatil</code> field, printed as `<samp><span class="samp">/u</span></samp>' and `<samp><span class="samp">/v</span></samp>'.
This macro may only be used to get the value it may not be used to change
the value. Use <code>SUBREG_PROMOTED_UNSIGNED_SET</code> to change the value.
<p><a name="index-SUBREG_005fPROMOTED_005fUNSIGNED_005fSET-2694"></a><a name=your_sha256_hash-2695"></a><a name=your_sha256_hashreg_007d-2696"></a><a name=your_sha256_hash_007d-2697"></a><br><dt><code>SUBREG_PROMOTED_UNSIGNED_SET (</code><var>x</var><code>)</code><dd>Set the <code>unchanging</code> and <code>volatil</code> fields in a <code>subreg</code>
to reflect zero, sign, or other extension. If <code>volatil</code> is
zero, then <code>unchanging</code> as nonzero means zero extension and as
zero means sign extension. If <code>volatil</code> is nonzero then some
other type of extension was done via the <code>ptr_extend</code> instruction.
<p><a name="index-SUBREG_005fPROMOTED_005fVAR_005fP-2698"></a><a name=your_sha256_hash-2699"></a><a name=your_sha256_hashsubreg_007d-2700"></a><br><dt><code>SUBREG_PROMOTED_VAR_P (</code><var>x</var><code>)</code><dd>Nonzero in a <code>subreg</code> if it was made when accessing an object that
was promoted to a wider mode in accord with the <code>PROMOTED_MODE</code> machine
description macro (see <a href="Storage-Layout.html#Storage-Layout">Storage Layout</a>). In this case, the mode of
the <code>subreg</code> is the declared mode of the object and the mode of
<code>SUBREG_REG</code> is the mode of the register that holds the object.
Promoted variables are always either sign- or zero-extended to the wider
mode on every assignment. Stored in the <code>in_struct</code> field and
printed as `<samp><span class="samp">/s</span></samp>'.
<p><a name="index-SYMBOL_005fREF_005fUSED-2701"></a><a name=your_sha256_hash5fref_007d-2702"></a><br><dt><code>SYMBOL_REF_USED (</code><var>x</var><code>)</code><dd>In a <code>symbol_ref</code>, indicates that <var>x</var> has been used. This is
normally only used to ensure that <var>x</var> is only declared external
once. Stored in the <code>used</code> field.
<p><a name="index-SYMBOL_005fREF_005fWEAK-2703"></a><a name=your_sha256_hash2fi_007d-2704"></a><a name=your_sha256_hashbsymbol_005fref_007d-2705"></a><br><dt><code>SYMBOL_REF_WEAK (</code><var>x</var><code>)</code><dd>In a <code>symbol_ref</code>, indicates that <var>x</var> has been declared weak.
Stored in the <code>return_val</code> field and printed as `<samp><span class="samp">/i</span></samp>'.
<p><a name="index-SYMBOL_005fREF_005fFLAG-2706"></a><a name=your_sha256_hash2fv_007d-2707"></a><a name=your_sha256_hash_005fref_007d-2708"></a><br><dt><code>SYMBOL_REF_FLAG (</code><var>x</var><code>)</code><dd>In a <code>symbol_ref</code>, this is used as a flag for machine-specific purposes.
Stored in the <code>volatil</code> field and printed as `<samp><span class="samp">/v</span></samp>'.
<p>Most uses of <code>SYMBOL_REF_FLAG</code> are historic and may be subsumed
by <code>SYMBOL_REF_FLAGS</code>. Certainly use of <code>SYMBOL_REF_FLAGS</code>
is mandatory if the target requires more than one bit of storage.
<p><a name="index-PREFETCH_005fSCHEDULE_005fBARRIER_005fP-2709"></a><a name=your_sha256_hash7d-2710"></a><a name=your_sha256_hashtch_007d-2711"></a><br><dt><code>PREFETCH_SCHEDULE_BARRIER_P (</code><var>x</var><code>)</code><dd>In a <code>prefetch</code>, indicates that the prefetch is a scheduling barrier.
No other INSNs will be moved over it.
Stored in the <code>volatil</code> field and printed as `<samp><span class="samp">/v</span></samp>'.
</dl>
<p>These are the fields to which the above macros refer:
<a name="index-call-2712"></a>
<a name="index-g_t_0040samp_007b_002fc_007d-in-RTL-dump-2713"></a>
<dl><dt><code>call</code><dd>In a <code>mem</code>, 1 means that the memory reference will not trap.
<p>In a <code>call</code>, 1 means that this pure or const call may possibly
infinite loop.
<p>In an RTL dump, this flag is represented as `<samp><span class="samp">/c</span></samp>'.
<p><a name="index-frame_005frelated-2714"></a><a name="index-g_t_0040samp_007b_002ff_007d-in-RTL-dump-2715"></a><br><dt><code>frame_related</code><dd>In an <code>insn</code> or <code>set</code> expression, 1 means that it is part of
a function prologue and sets the stack pointer, sets the frame pointer,
saves a register, or sets up a temporary register to use in place of the
frame pointer.
<p>In <code>reg</code> expressions, 1 means that the register holds a pointer.
<p>In <code>mem</code> expressions, 1 means that the memory reference holds a pointer.
<p>In <code>symbol_ref</code> expressions, 1 means that the reference addresses
this function's string constant pool.
<p>In an RTL dump, this flag is represented as `<samp><span class="samp">/f</span></samp>'.
<p><a name="index-in_005fstruct-2716"></a><a name="index-g_t_0040samp_007b_002fs_007d-in-RTL-dump-2717"></a><br><dt><code>in_struct</code><dd>In <code>reg</code> expressions, it is 1 if the register has its entire life
contained within the test expression of some loop.
<p>In <code>subreg</code> expressions, 1 means that the <code>subreg</code> is accessing
an object that has had its mode promoted from a wider mode.
<p>In <code>label_ref</code> expressions, 1 means that the referenced label is
outside the innermost loop containing the insn in which the <code>label_ref</code>
was found.
<p>In <code>code_label</code> expressions, it is 1 if the label may never be deleted.
This is used for labels which are the target of non-local gotos. Such a
label that would have been deleted is replaced with a <code>note</code> of type
<code>NOTE_INSN_DELETED_LABEL</code>.
<p>In an <code>insn</code> during dead-code elimination, 1 means that the insn is
dead code.
<p>In an <code>insn</code> or <code>jump_insn</code> during reorg for an insn in the
delay slot of a branch,
1 means that this insn is from the target of the branch.
<p>In an <code>insn</code> during instruction scheduling, 1 means that this insn
must be scheduled as part of a group together with the previous insn.
<p>In an RTL dump, this flag is represented as `<samp><span class="samp">/s</span></samp>'.
<p><a name="index-return_005fval-2718"></a><a name="index-g_t_0040samp_007b_002fi_007d-in-RTL-dump-2719"></a><br><dt><code>return_val</code><dd>In <code>reg</code> expressions, 1 means the register contains
the value to be returned by the current function. On
machines that pass parameters in registers, the same register number
may be used for parameters as well, but this flag is not set on such
uses.
<p>In <code>symbol_ref</code> expressions, 1 means the referenced symbol is weak.
<p>In <code>call</code> expressions, 1 means the call is pure.
<p>In an RTL dump, this flag is represented as `<samp><span class="samp">/i</span></samp>'.
<p><a name="index-jump-2720"></a><a name="index-g_t_0040samp_007b_002fj_007d-in-RTL-dump-2721"></a><br><dt><code>jump</code><dd>In a <code>mem</code> expression, 1 means we should keep the alias set for this
mem unchanged when we access a component.
<p>In a <code>set</code>, 1 means it is for a return.
<p>In a <code>call_insn</code>, 1 means it is a sibling call.
<p>In an RTL dump, this flag is represented as `<samp><span class="samp">/j</span></samp>'.
<p><a name="index-unchanging-2722"></a><a name="index-g_t_0040samp_007b_002fu_007d-in-RTL-dump-2723"></a><br><dt><code>unchanging</code><dd>In <code>reg</code> and <code>mem</code> expressions, 1 means
that the value of the expression never changes.
<p>In <code>subreg</code> expressions, it is 1 if the <code>subreg</code> references an
unsigned object whose mode has been promoted to a wider mode.
<p>In an <code>insn</code> or <code>jump_insn</code> in the delay slot of a branch
instruction, 1 means an annulling branch should be used.
<p>In a <code>symbol_ref</code> expression, 1 means that this symbol addresses
something in the per-function constant pool.
<p>In a <code>call_insn</code> 1 means that this instruction is a call to a const
function.
<p>In an RTL dump, this flag is represented as `<samp><span class="samp">/u</span></samp>'.
<p><a name="index-used-2724"></a><br><dt><code>used</code><dd>This flag is used directly (without an access macro) at the end of RTL
generation for a function, to count the number of times an expression
appears in insns. Expressions that appear more than once are copied,
according to the rules for shared structure (see <a href="Sharing.html#Sharing">Sharing</a>).
<p>For a <code>reg</code>, it is used directly (without an access macro) by the
leaf register renumbering code to ensure that each register is only
renumbered once.
<p>In a <code>symbol_ref</code>, it indicates that an external declaration for
the symbol has already been written.
<p><a name="index-volatil-2725"></a><a name="index-g_t_0040samp_007b_002fv_007d-in-RTL-dump-2726"></a><br><dt><code>volatil</code><dd><a name="index-volatile-memory-references-2727"></a>In a <code>mem</code>, <code>asm_operands</code>, or <code>asm_input</code>
expression, it is 1 if the memory
reference is volatile. Volatile memory references may not be deleted,
reordered or combined.
<p>In a <code>symbol_ref</code> expression, it is used for machine-specific
purposes.
<p>In a <code>reg</code> expression, it is 1 if the value is a user-level variable.
0 indicates an internal compiler temporary.
<p>In an <code>insn</code>, 1 means the insn has been deleted.
<p>In <code>label_ref</code> and <code>reg_label</code> expressions, 1 means a reference
to a non-local label.
<p>In <code>prefetch</code> expressions, 1 means that the containing insn is a
scheduling barrier.
<p>In an RTL dump, this flag is represented as `<samp><span class="samp">/v</span></samp>'.
</dl>
</body></html>
```
|
```kotlin
package expo.modules.kotlin.types
import com.facebook.react.bridge.Dynamic
import com.facebook.react.bridge.DynamicFromObject
import com.facebook.react.bridge.ReadableMap
import expo.modules.kotlin.exception.CollectionElementCastException
import expo.modules.kotlin.exception.exceptionDecorator
import expo.modules.kotlin.jni.ExpectedType
import expo.modules.kotlin.recycle
import kotlin.reflect.KType
class MapTypeConverter(
converterProvider: TypeConverterProvider,
private val mapType: KType
) : DynamicAwareTypeConverters<Map<*, *>>(mapType.isMarkedNullable) {
init {
require(mapType.arguments.first().type?.classifier == String::class) {
"The map key type should be String, but received ${mapType.arguments.first()}."
}
}
private val valueConverter = converterProvider.obtainTypeConverter(
requireNotNull(mapType.arguments.getOrNull(1)?.type) {
"The map type should contain the key type."
}
)
override fun convertFromDynamic(value: Dynamic): Map<*, *> {
val jsMap = value.asMap()
return convertFromReadableMap(jsMap)
}
override fun convertFromAny(value: Any): Map<*, *> {
return if (valueConverter.isTrivial()) {
value as Map<*, *>
} else {
(value as Map<*, *>).mapValues { (_, v) ->
exceptionDecorator({ cause ->
CollectionElementCastException(
mapType,
mapType.arguments[1].type!!,
v!!::class,
cause
)
}) {
valueConverter.convert(v)
}
}
}
}
private fun convertFromReadableMap(jsMap: ReadableMap): Map<*, *> {
val result = mutableMapOf<String, Any?>()
jsMap.entryIterator.forEach { (key, value) ->
DynamicFromObject(value).recycle {
exceptionDecorator({ cause ->
CollectionElementCastException(mapType, mapType.arguments[1].type!!, type, cause)
}) {
result[key] = valueConverter.convert(this)
}
}
}
return result
}
override fun getCppRequiredTypes(): ExpectedType = ExpectedType.forMap(
valueConverter.getCppRequiredTypes()
)
override fun isTrivial() = valueConverter.isTrivial()
}
```
|
```go
package decoder
import (
"bytes"
"encoding/json"
"io"
"strconv"
"unsafe"
"github.com/goccy/go-json/internal/errors"
)
const (
initBufSize = 512
)
type Stream struct {
buf []byte
bufSize int64
length int64
r io.Reader
offset int64
cursor int64
filledBuffer bool
allRead bool
UseNumber bool
DisallowUnknownFields bool
Option *Option
}
func NewStream(r io.Reader) *Stream {
return &Stream{
r: r,
bufSize: initBufSize,
buf: make([]byte, initBufSize),
Option: &Option{},
}
}
func (s *Stream) TotalOffset() int64 {
return s.totalOffset()
}
func (s *Stream) Buffered() io.Reader {
buflen := int64(len(s.buf))
for i := s.cursor; i < buflen; i++ {
if s.buf[i] == nul {
return bytes.NewReader(s.buf[s.cursor:i])
}
}
return bytes.NewReader(s.buf[s.cursor:])
}
func (s *Stream) PrepareForDecode() error {
for {
switch s.char() {
case ' ', '\t', '\r', '\n':
s.cursor++
continue
case ',', ':':
s.cursor++
return nil
case nul:
if s.read() {
continue
}
return io.EOF
}
break
}
return nil
}
func (s *Stream) totalOffset() int64 {
return s.offset + s.cursor
}
func (s *Stream) char() byte {
return s.buf[s.cursor]
}
func (s *Stream) equalChar(c byte) bool {
cur := s.buf[s.cursor]
if cur == nul {
s.read()
cur = s.buf[s.cursor]
}
return cur == c
}
func (s *Stream) stat() ([]byte, int64, unsafe.Pointer) {
return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data
}
func (s *Stream) bufptr() unsafe.Pointer {
return (*sliceHeader)(unsafe.Pointer(&s.buf)).data
}
func (s *Stream) statForRetry() ([]byte, int64, unsafe.Pointer) {
s.cursor-- // for retry ( because caller progress cursor position in each loop )
return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data
}
func (s *Stream) Reset() {
s.reset()
s.bufSize = int64(len(s.buf))
}
func (s *Stream) More() bool {
for {
switch s.char() {
case ' ', '\n', '\r', '\t':
s.cursor++
continue
case '}', ']':
return false
case nul:
if s.read() {
continue
}
return false
}
break
}
return true
}
func (s *Stream) Token() (interface{}, error) {
for {
c := s.char()
switch c {
case ' ', '\n', '\r', '\t':
s.cursor++
case '{', '[', ']', '}':
s.cursor++
return json.Delim(c), nil
case ',', ':':
s.cursor++
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
bytes := floatBytes(s)
str := *(*string)(unsafe.Pointer(&bytes))
if s.UseNumber {
return json.Number(str), nil
}
f64, err := strconv.ParseFloat(str, 64)
if err != nil {
return nil, err
}
return f64, nil
case '"':
bytes, err := stringBytes(s)
if err != nil {
return nil, err
}
return string(bytes), nil
case 't':
if err := trueBytes(s); err != nil {
return nil, err
}
return true, nil
case 'f':
if err := falseBytes(s); err != nil {
return nil, err
}
return false, nil
case 'n':
if err := nullBytes(s); err != nil {
return nil, err
}
return nil, nil
case nul:
if s.read() {
continue
}
goto END
default:
return nil, errors.ErrInvalidCharacter(s.char(), "token", s.totalOffset())
}
}
END:
return nil, io.EOF
}
func (s *Stream) reset() {
s.offset += s.cursor
s.buf = s.buf[s.cursor:]
s.length -= s.cursor
s.cursor = 0
}
func (s *Stream) readBuf() []byte {
if s.filledBuffer {
s.bufSize *= 2
remainBuf := s.buf
s.buf = make([]byte, s.bufSize)
copy(s.buf, remainBuf)
}
remainLen := s.length - s.cursor
remainNotNulCharNum := int64(0)
for i := int64(0); i < remainLen; i++ {
if s.buf[s.cursor+i] == nul {
break
}
remainNotNulCharNum++
}
s.length = s.cursor + remainNotNulCharNum
return s.buf[s.cursor+remainNotNulCharNum:]
}
func (s *Stream) read() bool {
if s.allRead {
return false
}
buf := s.readBuf()
last := len(buf) - 1
buf[last] = nul
n, err := s.r.Read(buf[:last])
s.length += int64(n)
if n == last {
s.filledBuffer = true
} else {
s.filledBuffer = false
}
if err == io.EOF {
s.allRead = true
} else if err != nil {
return false
}
return true
}
func (s *Stream) skipWhiteSpace() byte {
p := s.bufptr()
LOOP:
c := char(p, s.cursor)
switch c {
case ' ', '\n', '\t', '\r':
s.cursor++
goto LOOP
case nul:
if s.read() {
p = s.bufptr()
goto LOOP
}
}
return c
}
func (s *Stream) skipObject(depth int64) error {
braceCount := 1
_, cursor, p := s.stat()
for {
switch char(p, cursor) {
case '{':
braceCount++
depth++
if depth > maxDecodeNestingDepth {
return errors.ErrExceededMaxDepth(s.char(), s.cursor)
}
case '}':
braceCount--
depth--
if braceCount == 0 {
s.cursor = cursor + 1
return nil
}
case '[':
depth++
if depth > maxDecodeNestingDepth {
return errors.ErrExceededMaxDepth(s.char(), s.cursor)
}
case ']':
depth--
case '"':
for {
cursor++
switch char(p, cursor) {
case '\\':
cursor++
if char(p, cursor) == nul {
s.cursor = cursor
if s.read() {
_, cursor, p = s.stat()
continue
}
return errors.ErrUnexpectedEndOfJSON("string of object", cursor)
}
case '"':
goto SWITCH_OUT
case nul:
s.cursor = cursor
if s.read() {
_, cursor, p = s.statForRetry()
continue
}
return errors.ErrUnexpectedEndOfJSON("string of object", cursor)
}
}
case nul:
s.cursor = cursor
if s.read() {
_, cursor, p = s.stat()
continue
}
return errors.ErrUnexpectedEndOfJSON("object of object", cursor)
}
SWITCH_OUT:
cursor++
}
}
func (s *Stream) skipArray(depth int64) error {
bracketCount := 1
_, cursor, p := s.stat()
for {
switch char(p, cursor) {
case '[':
bracketCount++
depth++
if depth > maxDecodeNestingDepth {
return errors.ErrExceededMaxDepth(s.char(), s.cursor)
}
case ']':
bracketCount--
depth--
if bracketCount == 0 {
s.cursor = cursor + 1
return nil
}
case '{':
depth++
if depth > maxDecodeNestingDepth {
return errors.ErrExceededMaxDepth(s.char(), s.cursor)
}
case '}':
depth--
case '"':
for {
cursor++
switch char(p, cursor) {
case '\\':
cursor++
if char(p, cursor) == nul {
s.cursor = cursor
if s.read() {
_, cursor, p = s.stat()
continue
}
return errors.ErrUnexpectedEndOfJSON("string of object", cursor)
}
case '"':
goto SWITCH_OUT
case nul:
s.cursor = cursor
if s.read() {
_, cursor, p = s.statForRetry()
continue
}
return errors.ErrUnexpectedEndOfJSON("string of object", cursor)
}
}
case nul:
s.cursor = cursor
if s.read() {
_, cursor, p = s.stat()
continue
}
return errors.ErrUnexpectedEndOfJSON("array of object", cursor)
}
SWITCH_OUT:
cursor++
}
}
func (s *Stream) skipValue(depth int64) error {
_, cursor, p := s.stat()
for {
switch char(p, cursor) {
case ' ', '\n', '\t', '\r':
cursor++
continue
case nul:
s.cursor = cursor
if s.read() {
_, cursor, p = s.stat()
continue
}
return errors.ErrUnexpectedEndOfJSON("value of object", s.totalOffset())
case '{':
s.cursor = cursor + 1
return s.skipObject(depth + 1)
case '[':
s.cursor = cursor + 1
return s.skipArray(depth + 1)
case '"':
for {
cursor++
switch char(p, cursor) {
case '\\':
cursor++
if char(p, cursor) == nul {
s.cursor = cursor
if s.read() {
_, cursor, p = s.stat()
continue
}
return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset())
}
case '"':
s.cursor = cursor + 1
return nil
case nul:
s.cursor = cursor
if s.read() {
_, cursor, p = s.statForRetry()
continue
}
return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset())
}
}
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
for {
cursor++
c := char(p, cursor)
if floatTable[c] {
continue
} else if c == nul {
if s.read() {
_, cursor, p = s.stat()
continue
}
}
s.cursor = cursor
return nil
}
case 't':
s.cursor = cursor
if err := trueBytes(s); err != nil {
return err
}
return nil
case 'f':
s.cursor = cursor
if err := falseBytes(s); err != nil {
return err
}
return nil
case 'n':
s.cursor = cursor
if err := nullBytes(s); err != nil {
return err
}
return nil
}
cursor++
}
}
func nullBytes(s *Stream) error {
// current cursor's character is 'n'
s.cursor++
if s.char() != 'u' {
if err := retryReadNull(s); err != nil {
return err
}
}
s.cursor++
if s.char() != 'l' {
if err := retryReadNull(s); err != nil {
return err
}
}
s.cursor++
if s.char() != 'l' {
if err := retryReadNull(s); err != nil {
return err
}
}
s.cursor++
return nil
}
func retryReadNull(s *Stream) error {
if s.char() == nul && s.read() {
return nil
}
return errors.ErrInvalidCharacter(s.char(), "null", s.totalOffset())
}
func trueBytes(s *Stream) error {
// current cursor's character is 't'
s.cursor++
if s.char() != 'r' {
if err := retryReadTrue(s); err != nil {
return err
}
}
s.cursor++
if s.char() != 'u' {
if err := retryReadTrue(s); err != nil {
return err
}
}
s.cursor++
if s.char() != 'e' {
if err := retryReadTrue(s); err != nil {
return err
}
}
s.cursor++
return nil
}
func retryReadTrue(s *Stream) error {
if s.char() == nul && s.read() {
return nil
}
return errors.ErrInvalidCharacter(s.char(), "bool(true)", s.totalOffset())
}
func falseBytes(s *Stream) error {
// current cursor's character is 'f'
s.cursor++
if s.char() != 'a' {
if err := retryReadFalse(s); err != nil {
return err
}
}
s.cursor++
if s.char() != 'l' {
if err := retryReadFalse(s); err != nil {
return err
}
}
s.cursor++
if s.char() != 's' {
if err := retryReadFalse(s); err != nil {
return err
}
}
s.cursor++
if s.char() != 'e' {
if err := retryReadFalse(s); err != nil {
return err
}
}
s.cursor++
return nil
}
func retryReadFalse(s *Stream) error {
if s.char() == nul && s.read() {
return nil
}
return errors.ErrInvalidCharacter(s.char(), "bool(false)", s.totalOffset())
}
```
|
Honey Creek Friends' Meetinghouse is an historic building located in New Providence, Iowa, United States. It was listed on the National Register of Historic Places in 1980.
History
The Religious Society of Friends, who were originally from Yadkin County, North Carolina, organized the Honey Creek Monthly Meeting in 1852. They were a party of 44 composed of members of the Reece family ranging in ages from 8 weeks to over 70 years. It took two months for the party to reach their destination of Salem, Iowa. They moved on from there to establish the church in New Providence, Iowa. Not one railroad was crossed during the trek. Their first meeting house was a log structure that was built in 1854 and was replaced in 1859 after it had been destroyed by a fire. Various additions were made to the building and served the community until 1916 when they constructed this building. The meetinghouse was constructed by Ernie Moon, a local builder, and church members. Services are no longer held in the building (Reopened February 20, 2022, for weekly 'Unprogrammed' gatherings), which is now owned by the Honey Creek Preservation Group. They maintain the building, sponsor events and maintain records and other information from the church.
Architecture
The meetinghouse is constructed of clay tiles with a brick veneer on the exterior. The building's main gable is oriented on a north-south axis, with a smaller front gable that sits asymmetrically on the east side. It contains the main entrance into the building. Behind the entrance is a short square bell tower with a crenellated parapet and pairs of elliptical arched openings on each side of the bell chamber. There is a secondary entrance on the north side of the building.
Large segmental arched windows light the interior on the north, south and east sides of the building. All of the other window openings on the building are rectilinear. The main entrance consists of two paneled doors. They open into a vestibule and stair hall, as the meeting room sits a few steps above the entrance. The meeting room occupies most of the main level of the building.
References
Religious organizations established in 1852
Churches completed in 1916
20th-century Quaker meeting houses
Bungalow architecture in Iowa
American Craftsman architecture in Iowa
Churches in Hardin County, Iowa
Quaker meeting houses in Iowa
Churches on the National Register of Historic Places in Iowa
1852 establishments in Iowa
National Register of Historic Places in Hardin County, Iowa
|
Donna Rowland Barrett (born June 5, 1969, in Shelbyville, Tennessee) is a former Republican member of the State Representative in the Tennessee General Assembly for the 34th House District in Rutherford County, Tennessee. She served as the State Representative from that area from 2000 to 2010.
Biography
She attended Middle Tennessee State University. She is married to Ronnie Barrett, CEO of Barrett Firearms.
Public service
In 1998, Barrett unsuccessfully ran against incumbent Representative Mary Ann Eckles for the 49th District House Seat, receiving 48% of the vote. In 2006, she ran a successful campaign against Mary Ann Eckles. She served in the Tennessee House of Representatives from 2000 to 2010. She was a member of the Finance, Ways and Means Committee, the Children and Family Affairs Committee, and the Consumer and Employee Affairs Committee. She also served on the powerful Fiscal Review Committee, overseeing government spending for the state. In 2005 she was awarded the "Taxpayer Hero Award" by Tennessee Tax Revolt, Inc.
In April 2010 she was one of five persons added to the governing body of the Tennessee Center for Policy Research (TCPR).
References
External links
Donna Rowland's Profile on the Tennessee General Assembly Information Page
Donna Rowland's Campaign/Constituent Website
1969 births
Living people
People from Shelbyville, Tennessee
People from Rutherford County, Tennessee
Republican Party members of the Tennessee House of Representatives
Women state legislators in Tennessee
21st-century American women
|
Wayde Compton (born 1972) is a Canadian writer. He was born in Vancouver, British Columbia.
Compton has published books of poetry, essays, and fiction, and he edited the first comprehensive anthology of black writing from British Columbia. He co-founded Commodore Books with David Chariandy and Karina Vernon in 2006, the first black-oriented press in Western Canada. He also co-founded the Hogan's Alley Memorial Project in 2002, a grassroots organization that promotes the history of Vancouver's black community. Compton teaches in the faculty of Creative Writing at Douglas College.
In 1996 he penned the semi-autobiographical poem "Declaration of the Halfrican Nation".
Bibliography
Anthologies
Bluesprint: Black British Columbian Literature and Orature (2001)
The Revolving City: 51 Poems and the Stories Behind Them (with Renee Sarojini Saklikar) (2015)
Fiction
The Outer Harbour: Stories (2014)
Graphic fiction
The Blue Road: A Fable of Migration (illustrated by April dela Noche Milne) (2019)
Non-fiction
After Canaan: Essays on Race, Writing, and Region (2010)
Poetry
49th Parallel Psalm (1999)
Performance Bond (2004)
See also
Canadian literature
Canadian poetry
List of Canadian poets
List of Canadian writers
References
External links
Hogan's Alley Memorial Project blog
1972 births
20th-century Canadian male writers
20th-century Canadian poets
20th-century Canadian short story writers
21st-century Canadian male writers
21st-century Canadian poets
21st-century Canadian short story writers
Black Canadian writers
Canadian male non-fiction writers
Canadian male poets
Canadian male short story writers
21st-century Canadian non-fiction writers
Living people
Writers from British Columbia
Writers from Vancouver
|
The Pittsburgh-Des Moines Steel Company (originally the Des Moines Bridge and Iron Company), and often referred to as Pitt-Des Moines Steel or PDM was an American steel fabrication company. It operated from 1892 until approximately 2002 when its assets were sold to other companies, including Chicago Bridge & Iron Company. The company began as a builder of steel water tanks and bridges. It also later fabricated the "forked" columns for the World Trade Center in the 1960s, and was the steel fabricator and erector for the Gateway Arch in St. Louis. A number of its works are listed on the National Register of Historic Places.
History
The company was founded in 1892 by two graduates of Iowa State College, William H. Jackson and Berkeley M. Moss. The partners initially contracted to have their steel tanks fabricated by Keystone Bridge Company of Pittsburgh, but soon took on a third partner, Edward W. Crellin, who was operating a small fabricating shop in Des Moines, Iowa. It was at this point that the Des Moines Bridge and Iron Company was formed. The company would ship steel stock from Pittsburgh for the manufacture of a range of engineered products including water towers, bridges, water works and electric plants. Moss left the company around 1905, after a new fabricating plant had been opened in Warren, Pennsylvania in 1900.
In 1916, the name of the company was changed to Pittsburgh-Des Moines Steel Company, and a new headquarters was opened in Pittsburgh. The partnership remained until 1956, when the company was incorporated. It later became Pittsburgh-Des Moines Corporation in 1980, which was later shortened to Pitt-Des Moines, Inc. in 1985. It had also had registered "PDM" as a trademark as early as 1930.
In July 1993, the original site and fabrication works in Des Moines, Iowa (by then called the Des Moines Heavy Bridge Division) was damaged beyond salvage due to flooding from the Raccoon River, causing the site to be permanently closed, and later sold.
In 2001, the company was acquired by the Chicago Bridge & Iron Company. The Warren plant was closed in early 2009 by CB&I. Also in 2001, the company's steel distribution unit was acquired by Reliance Steel & Aluminum Co.
In 2016, PDM relocated its headquarters to the city of Elk Grove, California, where it remains today.
Works
Works include (with variations in attribution so noted):
Beaver Creek Bridge, 180th St. between B and C Aves. over Beaver Cr., Schleswig, Iowa (Des Moines Steel Co.), NRHP-listed
Black River Bridge (Carrizo), Indian Rt. 9 over Black River, Carrizo, Arizona (Pittsburg-Des Moines Steel Co.), NRHP-listed
Black River Bridge (Pocahontas), US 67, over the Black River, Pocahontas, Arkansas (Pittsburgh-Des Moines Steel Co.), NRHP-listed
Buck Grove Bridge, Buck Creek Ave. over Buck Cr., Buck Grove, Iowa (Des Moines Steel Co.), NRHP-listed
Burden Water Tower, US Hwy. 160 W of Maple St., Burden, Kansas (Des Moines Bridge & Iron Co.), 1911.
Cotter Water Tower, NE of jct. of NE US 62B and State St., Cotter, Arkansas (Pittsburgh Des Moines Steel Co.), NRHP-listed
Cotton Plant Water Tower, jct. of N. Main & N. Vine Sts., Cotton Plant, Arkansas (Pittsburgh Des Moines Steel Co.), NRHP-listed
De Valls Bluff Waterworks, jct. of Hazel and Rumbaugh Sts., De Valls Bluff, Arkansas (Pittsburgh Des Moines Steel Co.), NRHP-listed
East Soldier River Bridge, 120th St. over East Soldier R., Charter Oak, Iowa (Des Moines Steel Co.), NRHP-listed
Elevated Metal Water Tank, Crosby, West side First Ave. E., bet. First and Second Sts. N., Crosby, Minnesota (Des Moines Bridge & Iron Co.), NRHP-listed
Elevated Metal Water Tank, Deerwood, 211 Maple St., Deerwood, Minnesota (Des Moines Bridge & Iron Co.), NRHP-listed
Forsyth Water Pumping Station, 3rd Ave. at the Yellowstone River, Forsyth, Montana (Des Moines Bridge Building Co.), NRHP-listed
Gateway Arch (1963-1965), St. Louis, Missouri (Pittsburgh-Des Moines was the steel fabricator and erector), designated as a National Historic Landmark
Hampton Waterworks, Hunt St., W of Lee St., Hampton, Arkansas (Pittsburg-Des Moines Steel Co.), NRHP-listed
Jefferson Street Viaduct, Jefferson St. over the Des Moines River, Ottumwa, Iowa, NRHP-listed (design plans for a steel viaduct)
Mineral Springs Waterworks, S. of W. Runnels and S. Hall intersection, Mineral Springs, Arkansas (Pittsburgh Des Moines Steel Company), NRHP-listed
Missisquoi River Bridge, VT 105-A over the Missisquoi R., Richford, Vermont (Pittsburgh—Des Moines Steel Co.), NRHP-listed
Monroe Water Tower, 16th Ave. and 20th St., Monroe, Wisconsin (Des Moines Bridge and Iron Co.), NRHP-listed
Neillsville Standpipe, 325 E. 4th St., Neillsville, Wisconsin (Pittsburgh-Des Moines Steel Co.), NRHP-listed
Nishnabotna River Bridge, T Ave. over Nishnabotna R., Manilla, Iowa (Des Moines Steel Co.), NRHP-listed
State Highway 29 Bridge at the Colorado River, TX 29 at the Llano Cnty. line, Buchanan Dam, Texas (Pittsburgh-Des Moines Steel Co.), NRHP-listed
State Highway 9 Bridge at the Llano River, US 87, 10 mi. S of TX 29, Mason, Texas (Pittsburgh-Des Moines Steel Co.), NRHP-listed
US 83 Bridge at the Salt Fork of the Red River, US 83, near Wellington, Texas (Pittsburgh-DesMoines Steel Co.; Texas Highway Department), NRHP-listed
Waldo Water Tower, E. Main St. W of the N. Skimmer and E. Main intersection, Waldo, Arkansas (Pittsburgh Des Moines Steel Company), NRHP-listed
Yellow Smoke Park Bridge, pedestrian path over unnamed stream, Denison, Iowa (Des Moines Steel Co.), NRHP-listed
References
Bridge companies
Steel companies of the United States
Construction and civil engineering companies established in 1892
American companies established in 1892
1892 establishments in Iowa
Construction and civil engineering companies of the United States
|
```sqlpl
set allow_suspicious_low_cardinality_types = 1;
drop table if exists lc_00688;
create table lc_00688 (str LowCardinality(String), val LowCardinality(UInt8)) engine = MergeTree order by tuple();
insert into lc_00688 values ('a', 1), ('b', 2);
select str, str in ('a', 'd') from lc_00688;
select val, val in (1, 3) from lc_00688;
select str, str in (select arrayJoin(['a', 'd'])) from lc_00688;
select val, val in (select arrayJoin([1, 3])) from lc_00688;
select str, str in (select str from lc_00688) from lc_00688;
select val, val in (select val from lc_00688) from lc_00688;
drop table if exists lc_00688;
drop table if exists ary_lc_null;
CREATE TABLE ary_lc_null (i int, v Array(LowCardinality(Nullable(String)))) ENGINE = MergeTree() ORDER BY i ;
INSERT INTO ary_lc_null VALUES (1, ['1']);
SELECT v FROM ary_lc_null WHERE v IN (SELECT v FROM ary_lc_null);
drop table if exists ary_lc_null;
```
|
```java
/*
*
*
* path_to_url
*
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
*/
package com.haulmont.cuba.web.widgets.client.addons.dragdroplayouts.ui.accordion;
import com.vaadin.client.ComponentConnector;
import com.vaadin.client.ui.dd.VAcceptCallback;
import com.vaadin.client.ui.dd.VDragEvent;
import com.haulmont.cuba.web.widgets.client.addons.dragdroplayouts.ui.VDDAbstractDropHandler;
public class VDDAccordionDropHandler
extends VDDAbstractDropHandler<VDDAccordion> {
public VDDAccordionDropHandler(ComponentConnector connector) {
super(connector);
}
@Override
protected void dragAccepted(VDragEvent drag) {
dragOver(drag);
}
@Override
public boolean drop(VDragEvent drag) {
getLayout().deEmphasis();
getLayout().updateDragDetails(drag);
return getLayout().postDropHook(drag) && super.drop(drag);
};
@Override
public void dragOver(VDragEvent drag) {
getLayout().deEmphasis();
getLayout().updateDragDetails(drag);
getLayout().postOverHook(drag);
// Validate the drop
validate(new VAcceptCallback() {
public void accepted(VDragEvent event) {
getLayout().emphasis(event.getElementOver(), event);
}
}, drag);
}
@Override
public void dragEnter(VDragEvent drag) {
super.dragEnter(drag);
getLayout().updateDragDetails(drag);
getLayout().postEnterHook(drag);
}
@Override
public void dragLeave(VDragEvent drag) {
getLayout().deEmphasis();
getLayout().updateDragDetails(drag);
getLayout().postLeaveHook(drag);
}
}
```
|
```c++
/*
* POJ 2724: Purifying Machine
* *12
*
* *1
*/
#include <cstdio>
#include <cstring>
using namespace std;
bool odd[1030];
bool infect[1030];
bool vis[1030];
int rec[1030];
int n, m;
bool Dfs(int u) {
for (int i = 0; i < n; ++i) {
int v = u ^(1 << i);
if (infect[v] && !vis[v]) {
vis[v] = 1;
if (rec[v] == -1 || Dfs(rec[v])) {
rec[v] = u;
return true;
}
}
}
return false;
}
int Hungary() {
int ans = 0;
memset(rec, -1, sizeof(rec));
for (int i = (1 << n) - 1; i >= 0; --i) {
if (infect[i] && odd[i]) {
memset(vis, 0, sizeof(vis));
if (Dfs(i)) ++ans;
}
}
return ans;
}
int main() {
for (int i = 0; i < 1024; ++i) {
int j = i, cnt = 0;
while (j) {
if (j & 1) ++cnt;
j >>= 1;
}
if (cnt & 1) odd[i] = 1;
}
while (scanf("%d%d", &n, &m) != EOF && n) {
memset(infect, 0, sizeof(infect));
for (int i = 0; i < m; ++i) {
char op[12];
scanf("%s", op);
int s = -1, t = 0;
for (int j = 0; j < n; ++j) {
t <<= 1;
if (op[j] == '1') {
t |= 1;
} else if (op[j] == '*') {
s = j;
}
}
infect[t] = 1;
if (s != -1) {
infect[1 << (n - 1 - s) | t] = 1;
}
}
int ans = 0;
for (int i = (1 << n) - 1; i >= 0; --i) {
if (infect[i]) ++ans;
}
ans -= Hungary();
printf("%d\n", ans);
}
return 0;
}
```
|
This page provides links to lists of amusement parks by region (below), and alphabetically beginning with the name of the park (right).
By region
Africa
America
Asia
Oceania
Europe
Amusement parks
|
```c
/* packet-kismet.c
* Routines for kismet packet dissection
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
*
* Copied from packet-pop.c
*
* This program is free software; you can redistribute it and/or
* as published by the Free Software Foundation; either version 2
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <stdlib.h>
#include <epan/packet.h>
#include <epan/to_str.h>
#include <epan/strutil.h>
#include <epan/prefs.h>
static int proto_kismet = -1;
static int hf_kismet_response = -1;
static int hf_kismet_request = -1;
static int hf_kismet_version = -1;
static int hf_kismet_start_time = -1;
static int hf_kismet_server_name = -1;
static int hf_kismet_build_revision = -1;
static int hf_kismet_unknown_field = -1;
static int hf_kismet_extended_version_string = -1;
static int hf_kismet_time = -1;
static gint ett_kismet = -1;
static gint ett_kismet_reqresp = -1;
#define TCP_PORT_KISMET 2501
static guint global_kismet_tcp_port = TCP_PORT_KISMET;
static gboolean response_is_continuation(const guchar * data);
void proto_reg_handoff_kismet(void);
void proto_register_kismet(void);
static int
dissect_kismet(tvbuff_t * tvb, packet_info * pinfo, proto_tree * tree, void * data _U_)
{
gboolean is_request;
gboolean is_continuation;
proto_tree *kismet_tree=NULL, *reqresp_tree=NULL;
proto_item *ti;
proto_item *tmp_item;
gint offset = 0;
const guchar *line;
gint next_offset;
int linelen;
int tokenlen;
int i;
const guchar *next_token;
/*
* Find the end of the first line.
*
* Note that "tvb_find_line_end()" will return a value that is
* not longer than what's in the buffer, so the "tvb_get_ptr()"
* call won't throw an exception.
*/
linelen = tvb_find_line_end(tvb, offset, -1, &next_offset, FALSE);
line = tvb_get_ptr(tvb, offset, linelen);
/*
* Check if it is an ASCII based protocol with reasonable length
* packets, if not return, and try another dissector.
*/
if (linelen < 8) {
/*
* Packet is too short
*/
return 0;
} else {
for (i = 0; i < 8; ++i) {
/*
* Packet contains non-ASCII data
*/
if (line[i] < 32 || line[i] > 128)
return 0;
}
}
/*
* If it is Kismet traffic set COL_PROTOCOL.
*/
col_set_str(pinfo->cinfo, COL_PROTOCOL, "kismet");
/*
* Check if it is request, reply or continuation.
*/
if (pinfo->match_uint == pinfo->destport) {
is_request = TRUE;
is_continuation = FALSE;
} else {
is_request = FALSE;
is_continuation = response_is_continuation (line);
}
/*
* Put the first line from the buffer into the summary
* if it's a kismet request or reply (but leave out the
* line terminator).
* Otherwise, just call it a continuation.
*/
if (is_continuation)
col_set_str(pinfo->cinfo, COL_INFO, "Continuation");
else
col_add_fstr(pinfo->cinfo, COL_INFO, "%s: %s",
is_request ? "Request" : "Response",
format_text(line, linelen));
if (tree) {
ti = proto_tree_add_item(tree, proto_kismet, tvb, offset, -1, ENC_NA);
kismet_tree = proto_item_add_subtree(ti, ett_kismet);
}
if (is_continuation) {
/*
* Put the whole packet into the tree as data.
*/
call_data_dissector(tvb, pinfo, kismet_tree);
return tvb_captured_length(tvb);
}
if (is_request) {
tmp_item = proto_tree_add_boolean(kismet_tree,
hf_kismet_request, tvb, 0, 0, TRUE);
} else {
tmp_item = proto_tree_add_boolean(kismet_tree,
hf_kismet_response, tvb, 0, 0, TRUE);
}
PROTO_ITEM_SET_GENERATED (tmp_item);
while (tvb_offset_exists(tvb, offset)) {
/*
* Find the end of the line.
*/
linelen = tvb_find_line_end(tvb, offset, -1, &next_offset, FALSE);
if (linelen) {
/*
* Put this line.
*/
reqresp_tree = proto_tree_add_subtree(kismet_tree, tvb, offset,
next_offset - offset, ett_kismet_reqresp, NULL,
tvb_format_text(tvb, offset,
next_offset - offset - 1));
tokenlen = get_token_len(line, line + linelen, &next_token);
if (tokenlen != 0) {
guint8 *reqresp;
reqresp = tvb_get_string_enc(wmem_packet_scope(), tvb, offset, tokenlen, ENC_ASCII);
if (is_request) {
/*
* No request dissection
*/
} else {
/*
* *KISMET: {Version} {Start time} \001{Server name}\001 {Build Revision}
* two fields left undocumented: {???} {?ExtendedVersion?}
*/
if (!strncmp(reqresp, "*KISMET", 7)) {
offset += (gint) (next_token - line);
linelen -= (int) (next_token - line);
line = next_token;
tokenlen = get_token_len(line, line + linelen, &next_token);
proto_tree_add_string(reqresp_tree, hf_kismet_version, tvb, offset,
tokenlen, format_text(line, tokenlen));
offset += (gint) (next_token - line);
linelen -= (int) (next_token - line);
line = next_token;
tokenlen = get_token_len(line, line + linelen, &next_token);
proto_tree_add_string(reqresp_tree, hf_kismet_start_time, tvb, offset,
tokenlen, format_text(line, tokenlen));
offset += (gint) (next_token - line);
linelen -= (int) (next_token - line);
line = next_token;
tokenlen = get_token_len(line, line + linelen, &next_token);
proto_tree_add_string(reqresp_tree, hf_kismet_server_name, tvb, offset,
tokenlen, format_text(line + 1, tokenlen - 2));
offset += (gint) (next_token - line);
linelen -= (int) (next_token - line);
line = next_token;
tokenlen = get_token_len(line, line + linelen, &next_token);
proto_tree_add_string(reqresp_tree, hf_kismet_build_revision, tvb, offset,
tokenlen, format_text(line, tokenlen));
offset += (gint) (next_token - line);
linelen -= (int) (next_token - line);
line = next_token;
tokenlen = get_token_len(line, line + linelen, &next_token);
proto_tree_add_string(reqresp_tree, hf_kismet_unknown_field, tvb, offset,
tokenlen, format_text(line, tokenlen));
offset += (gint) (next_token - line);
linelen -= (int) (next_token - line);
line = next_token;
tokenlen = get_token_len(line, line + linelen, &next_token);
proto_tree_add_string(reqresp_tree, hf_kismet_extended_version_string, tvb, offset,
tokenlen, format_text(line, tokenlen));
}
/*
* *TIME: {Time}
*/
if (!strncmp(reqresp, "*TIME", 5)) {
nstime_t t;
char *ptr;
offset += (gint) (next_token - line);
linelen -= (int) (next_token - line);
line = next_token;
tokenlen = get_token_len(line, line + linelen, &next_token);
/* Convert form ascii to nstime */
t.secs = atoi(format_text (line, tokenlen));
t.nsecs = 0;
/*
* Format ascii representation of time
*/
ptr = abs_time_secs_to_str(wmem_packet_scope(), t.secs, ABSOLUTE_TIME_LOCAL, TRUE);
proto_tree_add_time_format_value(reqresp_tree, hf_kismet_time, tvb, offset, tokenlen, &t, "%s", ptr);
}
}
/*offset += (gint) (next_token - line);
linelen -= (int) (next_token - line);*/
line = next_token;
}
}
offset = next_offset;
}
return tvb_captured_length(tvb);
}
static gboolean
response_is_continuation(const guchar * data)
{
if (!strncmp(data, "*", 1))
return FALSE;
if (!strncmp(data, "!", 1))
return FALSE;
return TRUE;
}
void
proto_register_kismet(void)
{
static hf_register_info hf[] = {
{&hf_kismet_response,
{"Response", "kismet.response", FT_BOOLEAN, BASE_NONE,
NULL, 0x0, "TRUE if kismet response", HFILL}},
{&hf_kismet_request,
{"Request", "kismet.request", FT_BOOLEAN, BASE_NONE,
NULL, 0x0, "TRUE if kismet request", HFILL}},
{&hf_kismet_version,
{"Version", "kismet.version", FT_STRING, BASE_NONE,
NULL, 0x0, NULL, HFILL}},
{&hf_kismet_start_time,
{"Start time", "kismet.start_time", FT_STRING, BASE_NONE,
NULL, 0x0, NULL, HFILL}},
{&hf_kismet_server_name,
{"Server name", "kismet.server_name", FT_STRING, BASE_NONE,
NULL, 0x0, NULL, HFILL}},
{&hf_kismet_build_revision,
{"Build revision", "kismet.build_revision", FT_STRING, BASE_NONE,
NULL, 0x0, NULL, HFILL}},
{&hf_kismet_unknown_field,
{"Unknown field", "kismet.unknown_field", FT_STRING, BASE_NONE,
NULL, 0x0, NULL, HFILL}},
{&hf_kismet_extended_version_string,
{"Extended version string", "kismet.extended_version_string", FT_STRING, BASE_NONE,
NULL, 0x0, NULL, HFILL}},
{&hf_kismet_time,
{"Time", "kismet.time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL,
NULL, 0x0, NULL, HFILL}},
};
static gint *ett[] = {
&ett_kismet,
&ett_kismet_reqresp,
};
module_t *kismet_module;
proto_kismet = proto_register_protocol("Kismet Client/Server Protocol", "Kismet", "kismet");
proto_register_field_array(proto_kismet, hf, array_length (hf));
proto_register_subtree_array(ett, array_length (ett));
/* Register our configuration options for Kismet, particularly our port */
kismet_module = prefs_register_protocol(proto_kismet, proto_reg_handoff_kismet);
prefs_register_uint_preference(kismet_module, "tcp.port",
"Kismet Server TCP Port",
"Set the port for Kismet Client/Server messages (if other"
" than the default of 2501)", 10,
&global_kismet_tcp_port);
}
void
proto_reg_handoff_kismet(void)
{
static gboolean kismet_prefs_initialized = FALSE;
static dissector_handle_t kismet_handle;
static guint tcp_port;
if (!kismet_prefs_initialized) {
kismet_handle = create_dissector_handle(dissect_kismet, proto_kismet);
kismet_prefs_initialized = TRUE;
} else {
dissector_delete_uint("tcp.port", tcp_port, kismet_handle);
}
/* Set our port number for future use */
tcp_port = global_kismet_tcp_port;
dissector_add_uint("tcp.port", global_kismet_tcp_port, kismet_handle);
}
/*
* Editor modelines - path_to_url
*
* Local variables:
* c-basic-offset: 8
* tab-width: 8
* indent-tabs-mode: t
* End:
*
* vi: set shiftwidth=8 tabstop=8 noexpandtab:
* :indentSize=8:tabSize=8:noTabs=false:
*/
```
|
The Empire World Towers were two proposed supertall skyscrapers to be built in Miami, Florida in the United States. The complex consists of the Empire World Tower I and the Empire World Tower II. If completed, both towers would stand at 1,022 feet (312 m), with 93 stories each. They would surpass the Panorama Tower and become the city and state's tallest buildings, since the approved One Bayfront Plaza had been reduced to a height of 1,010 ft. The towers were not built, but in 2023 the supertall Waldorf Astoria Miami began construction on the same block at 300 Biscayne Boulevard.
History
If approved, constructed, and completed in time, the towers would have surpassed Queensland 1 as the world's tallest all residential buildings, until the completion of the Chicago Spire, but ultimately fall well short of the Central Park Tower.
Height
The Empire World Towers were originally proposed to rise 1,200 feet (366 m) and 106 stories.
In December 2007, the height of the towers was decreased to 1,022 feet (312 m). It is probable that the height decrease was brought on due to the concerns raised by the Federal Aviation Administration about the proposed buildings' heights.
See also
List of tallest buildings in Miami
Waldorf Astoria Miami
References
Empire World Condo Tower - The Skyscraper Center Council on Tall Buildings and Urban Habitat
External links
Entry on Emporis.com
Entry on Skyscraperpage.com
Residential skyscrapers in Miami
Proposed skyscrapers in the United States
Twin towers
|
A Costa Rican passport () is an identity document issued to Costa Rican citizens to travel outside Costa Rica. Currently, it is valid for 6 years (10 years before 2006). It is issued to people born on Costa Rican soil (who are citizens by default), and to children of Costa Rican citizens born abroad, who are reported to the nearest Costa Rican consulate (whose birth, immediately after such report, is recorded in the civil registry). Children born overseas to a Costa Rican citizen are Costa Rican by birth, not by naturalisation, as stated in the Constitution of Costa Rica.
As of 1 October 2019, Costa Rican citizens had visa-free or visa on arrival access to 150 countries and territories, ranking the Costa Rican passport 27th overall and first among Central American countries, in terms of travel freedom according to the Henley Passport Index.
In 2017, the Costa Rican government confirmed plans to begin issuing biometric passports by 2020. Due to the COVID-19 Pandemic the government of Costa Rica postponed the plans until 2022.
As of early 2022 the Costa Rican government began issuing the new biometric passports. The new passports showcase four important elements of the country: Biodiversity, Renewable Energy, Education and Peace, and Talent.
Appearance
It is dark blue on the outside, with letters and the Costa Rican coat of arms in gilded-looking letters.
Gallery of historic images
See also
List of passports
Visa requirements for Costa Rican citizens
External links
Images of a 1980 Costa Rican passport from www.passportland.com
References
Government of Costa Rica
Passports by country
|
```xml
<?xml version="1.0" encoding="UTF-8"?>
<definitions id="definitions"
xmlns="path_to_url"
xmlns:flowable="path_to_url"
targetNamespace="Examples"
xmlns:tns="Examples">
<process id="sendTestEventProcess">
<startEvent id="start" />
<sequenceFlow sourceRef="start" targetRef="sendEventTask" />
<serviceTask id="sendEventTask" flowable:type="send-event">
<extensionElements>
<flowable:eventType>simpleTest</flowable:eventType>
<flowable:sendSynchronously>true</flowable:sendSynchronously>
<flowable:systemChannel/>
<flowable:eventInParameter source="${customerId}" target="customer" />
<flowable:eventInParameter source="${customerName}" target="name" />
<flowable:eventInParameter source="${eventKey}" target="eventKey" />
<flowable:eventInParameter source="${action}" target="action" />
</extensionElements>
</serviceTask>
<sequenceFlow sourceRef="sendEventTask" targetRef="end" />
<endEvent id="end" />
</process>
</definitions>
```
|
In computer graphics, cube mapping is a method of environment mapping that uses the six faces of a cube as the map shape. The environment is projected onto the sides of a cube and stored as six square textures, or unfolded into six regions of a single texture.
The cube map is generated by first rendering the scene six times from a viewpoint, with the views defined by a 90 degree view frustum representing each cube face. Or if the environment is first considered to be projected onto a sphere, then each face of the cube is its gnomonic projection.
In the majority of cases, cube mapping is preferred over the older method of sphere mapping because it eliminates many of the problems that are inherent in sphere mapping such as image distortion, viewpoint dependency, and computational inefficiency. Also, cube mapping provides a much larger capacity to support real-time rendering of reflections relative to sphere mapping because the combination of inefficiency and viewpoint dependency severely limits the ability of sphere mapping to be applied when there is a consistently changing viewpoint.
Variants of cube mapping are also commonly used in 360 video projection.
History
Cube mapping was first proposed in 1986 by Ned Greene in his paper “Environment Mapping and Other Applications of World Projections”, ten years after environment mapping was first put forward by Jim Blinn and Martin Newell. However, hardware limitations on the ability to access six texture images simultaneously made it infeasible to implement cube mapping without further technological developments. This problem was remedied in 1999 with the release of the Nvidia GeForce 256. Nvidia touted cube mapping in hardware as “a breakthrough image quality feature of GeForce 256 that ... will allow developers to create accurate, real-time reflections. Accelerated in hardware, cube environment mapping will free up the creativity of developers to use reflections and specular lighting effects to create interesting, immersive environments.” Today, cube mapping is still used in a variety of graphical applications as a favored method of environment mapping.
Advantages
Cube mapping is preferred over other methods of environment mapping because of its relative simplicity. Also, cube mapping produces results that are similar to those obtained by ray tracing, but is much more computationally efficient – the moderate reduction in quality is compensated for by large gains in efficiency.
Predating cube mapping, sphere mapping has many inherent flaws that made it impractical for most applications. Sphere mapping is view-dependent, meaning that a different texture is necessary for each viewpoint. Therefore, in applications where the viewpoint is mobile, it would be necessary to dynamically generate a new sphere mapping for each new viewpoint (or, to pre-generate a mapping for every viewpoint). Also, a texture mapped onto a sphere's surface must be stretched and compressed, and warping and distortion (particularly along the edge of the sphere) are a direct consequence of this. Although these image flaws can be reduced using certain tricks and techniques like “pre-stretching”, this just adds another layer of complexity to sphere mapping.
Paraboloid mapping provides some improvement on the limitations of sphere mapping, however it requires two rendering passes in addition to special image warping operations and more involved computation.
Conversely, cube mapping requires only a single render pass, and due to its simple nature, is very easy for developers to comprehend and generate. Also, cube mapping uses the entire resolution of the texture image, compared to sphere and paraboloid mappings, which also allows it to use lower resolution images to achieve the same quality. Although handling the seams of the cube map is a problem, algorithms have been developed to handle seam behavior and result in a seamless reflection.
Disadvantages
If a new object or new lighting is introduced into scene or if some object that is reflected in it is moving or changing in some manner, then the reflection changes and the cube map must be re-rendered. When the cube map is affixed to an object that moves through the scene then the cube map must also be re-rendered from that new position.
Applications
Stable specular highlights
Computer-aided design (CAD) programs use specular highlights as visual cues to convey a sense of surface curvature when rendering 3D objects. However, many CAD programs exhibit problems in sampling specular highlights because the specular lighting computations are only performed at the vertices of the mesh used to represent the object, and interpolation is used to estimate lighting across the surface of the object. Problems occur when the mesh vertices are not dense enough, resulting in insufficient sampling of the specular lighting. This in turn results in highlights with brightness proportionate to the distance from mesh vertices, ultimately compromising the visual cues that indicate curvature. Unfortunately, this problem cannot be solved simply by creating a denser mesh, as this can greatly reduce the efficiency of object rendering.
Cube maps provide a fairly straightforward and efficient solution to rendering stable specular highlights. Multiple specular highlights can be encoded into a cube map texture, which can then be accessed by interpolating across the surface's reflection vector to supply coordinates. Relative to computing lighting at individual vertices, this method provides cleaner results that more accurately represent curvature. Another advantage to this method is that it scales well, as additional specular highlights can be encoded into the texture at no increase in the cost of rendering. However, this approach is limited in that the light sources must be either distant or infinite lights, although fortunately this is usually the case in CAD programs.
Skyboxes
Perhaps the most advanced application of cube mapping is to create pre-rendered panoramic sky images which are then rendered by the graphical engine as faces of a cube at practically infinite distance with the view point located in the center of the cube. The perspective projection of the cube faces done by the graphics engine undoes the effects of projecting the environment to create the cube map, so that the observer experiences an illusion of being surrounded by the scene which was used to generate the skybox. This technique has found a widespread use in video games since it allows designers to add complex (albeit not explorable) environments to a game at almost no performance cost.
Skylight illumination
Cube maps can be useful for modelling outdoor illumination accurately. Simply modelling sunlight as a single infinite light oversimplifies outdoor illumination and results in unrealistic lighting. Although plenty of light does come from the sun, the scattering of rays in the atmosphere causes the whole sky to act as a light source (often referred to as skylight illumination). However, by using a cube map the diffuse contribution from skylight illumination can be captured. Unlike environment maps where the reflection vector is used, this method accesses the cube map based on the surface normal vector to provide a fast approximation of the diffuse illumination from the skylight. The one downside to this method is that computing cube maps to properly represent a skylight is very complex; one recent process is computing the spherical harmonic basis that best represents the low frequency diffuse illumination from the cube map. However, a considerable amount of research has been done to effectively model skylight illumination.
Dynamic reflection
Basic environment mapping uses a static cube map - although the object can be moved and distorted, the reflected environment stays consistent. However, a cube map texture can be consistently updated to represent a dynamically changing environment (for example, trees swaying in the wind). A simple yet costly way to generate dynamic reflections, involves building the cube maps at runtime for every frame. Although this is far less efficient than static mapping because of additional rendering steps, it can still be performed at interactive rates.
Unfortunately, this technique does not scale well when multiple reflective objects are present. A unique dynamic environment map is usually required for each reflective object. Also, further complications are added if reflective objects can reflect each other - dynamic cube maps can be recursively generated approximating the effects normally generated using raytracing.
Global illumination
An algorithm for global illumination computation at interactive rates using a cube-map data structure, was presented at ICCVG 2002.
Projection textures
Another application which found widespread use in video games is projective texture mapping. It relies on cube maps to project images of an environment onto the surrounding scene; for example, a point light source is tied to a cube map which is a panoramic image shot from inside a lantern cage or a window frame through which the light is filtering. This enables a game developer to achieve realistic lighting without having to complicate the scene geometry or resort to expensive real-time shadow volume computations.
Memory addressing
A cube texture indexes six texture maps from 0 to 5 in order Positive X, Negative X, Positive Y, Negative Y, Positive Z, Negative Z. The images are stored with the origin at the lower left of the image. The Positive X and Y faces must reverse the Z coordinate and the Negative Z face must negate the X coordinate. If given the face, and texture coordinates , the non-normalized vector can be computed by the function:
void convert_cube_uv_to_xyz(int index, float u, float v, float *x, float *y, float *z)
{
// convert range 0 to 1 to -1 to 1
float uc = 2.0f * u - 1.0f;
float vc = 2.0f * v - 1.0f;
switch (index)
{
case 0: *x = 1.0f; *y = vc; *z = -uc; break; // POSITIVE X
case 1: *x = -1.0f; *y = vc; *z = uc; break; // NEGATIVE X
case 2: *x = uc; *y = 1.0f; *z = -vc; break; // POSITIVE Y
case 3: *x = uc; *y = -1.0f; *z = vc; break; // NEGATIVE Y
case 4: *x = uc; *y = vc; *z = 1.0f; break; // POSITIVE Z
case 5: *x = -uc; *y = vc; *z = -1.0f; break; // NEGATIVE Z
}
}
Likewise, a vector can be converted to the face index and texture coordinates with the function:
void convert_xyz_to_cube_uv(float x, float y, float z, int *index, float *u, float *v)
{
float absX = fabs(x);
float absY = fabs(y);
float absZ = fabs(z);
int isXPositive = x > 0 ? 1 : 0;
int isYPositive = y > 0 ? 1 : 0;
int isZPositive = z > 0 ? 1 : 0;
float maxAxis, uc, vc;
// POSITIVE X
if (isXPositive && absX >= absY && absX >= absZ) {
// u (0 to 1) goes from +z to -z
// v (0 to 1) goes from -y to +y
maxAxis = absX;
uc = -z;
vc = y;
*index = 0;
}
// NEGATIVE X
if (!isXPositive && absX >= absY && absX >= absZ) {
// u (0 to 1) goes from -z to +z
// v (0 to 1) goes from -y to +y
maxAxis = absX;
uc = z;
vc = y;
*index = 1;
}
// POSITIVE Y
if (isYPositive && absY >= absX && absY >= absZ) {
// u (0 to 1) goes from -x to +x
// v (0 to 1) goes from +z to -z
maxAxis = absY;
uc = x;
vc = -z;
*index = 2;
}
// NEGATIVE Y
if (!isYPositive && absY >= absX && absY >= absZ) {
// u (0 to 1) goes from -x to +x
// v (0 to 1) goes from -z to +z
maxAxis = absY;
uc = x;
vc = z;
*index = 3;
}
// POSITIVE Z
if (isZPositive && absZ >= absX && absZ >= absY) {
// u (0 to 1) goes from -x to +x
// v (0 to 1) goes from -y to +y
maxAxis = absZ;
uc = x;
vc = y;
*index = 4;
}
// NEGATIVE Z
if (!isZPositive && absZ >= absX && absZ >= absY) {
// u (0 to 1) goes from +x to -x
// v (0 to 1) goes from -y to +y
maxAxis = absZ;
uc = -x;
vc = y;
*index = 5;
}
// Convert range from -1 to 1 to 0 to 1
*u = 0.5f * (uc / maxAxis + 1.0f);
*v = 0.5f * (vc / maxAxis + 1.0f);
}
References
See also
Quadrilateralized spherical cube
Spherical image projection
Texture mapping
|
Bombay: Our City (Hamara Sahar) is a 1985 Indian documentary film directed by Anand Patwardhan. The film story based on daily battle for survival of the 4 million slum dwellers of Bombay who make up half the city's population. The film produced by the Ramesh Asher & Sanjiv Shah. Anand Patwardhan graduate (B.A.) in English Literature from Bombay University in 1970, won a scholarship to get another B.A. in sociology from Brandeis University in 1972 and earned a master's degree in communications from McGill University in 1982. The film was released on 7 June 1985.
Awards
National Film Award for Best Non-Feature Film, 1985
Special Jury Award, Cinema du reel, France, 1986
Filmfare Award for Best Documentary, 1986
References
External links
Bombay: Our City in Patwardhan's website
Indian documentary films
Documentary films about cities
1985 films
|
```smalltalk
using System.Collections.Generic;
namespace Ombi.Settings.Settings.Models
{
public class AuthenticationSettings : Settings
{
public bool AllowNoPassword { get; set; }
// Password Options
public bool RequireDigit { get; set; }
public int RequiredLength { get; set; }
public bool RequireLowercase { get; set; }
public bool RequireNonAlphanumeric { get; set; }
public bool RequireUppercase { get; set; }
public bool EnableOAuth { get; set; } // Plex OAuth
public bool EnableHeaderAuth { get; set; } // Header SSO
public string HeaderAuthVariable { get; set; } // Header SSO
public bool HeaderAuthCreateUser { get; set; } // Header SSO
}
}
```
|
Anthene sylvanus, the common indigo ciliate blue, is a butterfly in the family Lycaenidae. It is found in Senegal, Guinea-Bissau, Guinea, Sierra Leone, Burkina Faso, Liberia, Ivory Coast, Ghana, Togo, Benin, Nigeria, Cameroon, Gabon, the Republic of the Congo, the Central African Republic, the Democratic Republic of the Congo, Uganda and Tanzania. The habitat consists of forests and dense Guinea savanna.
Subspecies
Anthene sylvanus sylvanus (Senegal, Guinea-Bissau, Guinea, Sierra Leone, Burkina Faso, Liberia, Ivory Coast, Ghana, Togo, Benin, Nigeria: south and the Cross River loop, Cameroon, Gabon, Congo, Central African Republic, Democratic Republic of the Congo: Mongala, Uele, Ituri, Kivu, Tshopo, Kinshasa, Cataractes, Sankuru, Mamiema, Lualaba, Tanganika and Shaba)
Anthene sylvanus albicans (Grünberg, 1910) (southern Uganda, north-western Tanzania)
Anthene sylvanus niveus Stempffer, 1954 (Democratic Republic of the Congo: Equator)
References
Butterflies described in 1773
Anthene
Butterflies of Africa
Taxa named by Dru Drury
|
Below is a complete list of justices who have served on the Supreme Court of Appeals of West Virginia since 1863.
Court created by the 1863 West Virginia State Constitution
Court created by the 1872 West Virginia State Constitution
* appointed ----- † died in office ----- ° resigned/retired ----- ^ elected to an unexpired term
'''cac
Succession by seat
See also
Supreme Court of Appeals of West Virginia
J
West Virginia
|
```smalltalk
//////////////////////////////////////////////////////////////////////////
//PrintText.cs - script file from 'Script importing' tutorial
using System;
using System.Text;
using Scripting;
//css_import print;
class Script
{
static string usage = "Usage: csc printtext <text> ...\nThis script will print (with print preview) specified text on the system default printer.\n";
static public void Main(string[] args)
{
if (args.Length == 0 || (args.Length == 1 && (args[0] == "?" || args[0] == "/?" || args[0] == "-?" || args[0].ToLower() == "help")))
{
Console.WriteLine(usage);
}
else
{
SimplePrinting printer = new SimplePrinting();
printer.Print(args[0], true);
}
}
}
```
|
A Voice in the Dark may refer to:
"A Voice in the Dark" (song), a 2010 single by Blind Guardian
A Voice in the Dark, a track on the 2008 album Forever Magic by Fancy
A Voice in the Dark (comic), an ongoing horror/thriller comic book series by Larime Taylor
A Voice in the Dark (film), an American 1921 silent mystery film directed by Frank Lloyd
See also
Voices in the Dark, a compilation DVD set for Babylon 5: The Lost Tales
Voices in the Dark, a Broadway play by John Pielmeier
|
```java
package com.example.rds;
// snippet-start:[rds.java2.modify_instance.main]
// snippet-start:[rds.java2.modify_instance.import]
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.rds.RdsClient;
import software.amazon.awssdk.services.rds.model.ModifyDbInstanceRequest;
import software.amazon.awssdk.services.rds.model.ModifyDbInstanceResponse;
import software.amazon.awssdk.services.rds.model.RdsException;
// snippet-end:[rds.java2.modify_instance.import]
/**
* Before running this Java V2 code example, set up your development
* environment, including your credentials.
*
* For more information, see the following documentation topic:
*
* path_to_url
*/
public class ModifyDBInstance {
public static void main(String[] args) {
final String usage = """
Usage:
<dbInstanceIdentifier> <dbSnapshotIdentifier>\s
Where:
dbInstanceIdentifier - The database instance identifier.\s
masterUserPassword - The updated password that corresponds to the master user name.\s
""";
if (args.length != 2) {
System.out.println(usage);
System.exit(1);
}
String dbInstanceIdentifier = args[0];
String masterUserPassword = args[1];
Region region = Region.US_WEST_2;
RdsClient rdsClient = RdsClient.builder()
.region(region)
.build();
updateIntance(rdsClient, dbInstanceIdentifier, masterUserPassword);
rdsClient.close();
}
public static void updateIntance(RdsClient rdsClient, String dbInstanceIdentifier, String masterUserPassword) {
try {
// For a demo - modify the DB instance by modifying the master password.
ModifyDbInstanceRequest modifyDbInstanceRequest = ModifyDbInstanceRequest.builder()
.dbInstanceIdentifier(dbInstanceIdentifier)
.publiclyAccessible(true)
.masterUserPassword(masterUserPassword)
.build();
ModifyDbInstanceResponse instanceResponse = rdsClient.modifyDBInstance(modifyDbInstanceRequest);
System.out.print("The ARN of the modified database is: " + instanceResponse.dbInstance().dbInstanceArn());
} catch (RdsException e) {
System.out.println(e.getLocalizedMessage());
System.exit(1);
}
}
}
// snippet-end:[rds.java2.modify_instance.main]
```
|
```objective-c
/*
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* path_to_url and read it before using this file.
*
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*-
* All rights reserved.
*
* This software was developed by Robert Watson for the TrustedBSD Project.
*
* This software was developed for the FreeBSD Project in part by Network
* Associates Laboratories, the Security Research Division of Network
* Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"),
* as part of the DARPA CHATS research program.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/sys/sys/_label.h,v 1.4 2003/05/08 19:49:42 rwatson Exp $
*/
#ifndef _SECURITY_LABEL_H_
#define _SECURITY_LABEL_H_
/*
* XXXMAC: This shouldn't be exported to userland, but is because of ucred.h
* and various other messes.
*/
#if CONFIG_EMBEDDED
#define MAC_MAX_SLOTS 3
#else
#define MAC_MAX_SLOTS 7
#endif
#define MAC_FLAG_INITIALIZED 0x0000001 /* Is initialized for use. */
struct label {
int l_flags;
union {
void *l_ptr;
long l_long;
} l_perpolicy[MAC_MAX_SLOTS];
};
#endif /* !_SECURITY_LABEL_H_ */
```
|
```go
/*
path_to_url
Unless required by applicable law or agreed to in writing, software
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use
// with apply.
type RoleRefApplyConfiguration struct {
APIGroup *string `json:"apiGroup,omitempty"`
Kind *string `json:"kind,omitempty"`
Name *string `json:"name,omitempty"`
}
// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with
// apply.
func RoleRef() *RoleRefApplyConfiguration {
return &RoleRefApplyConfiguration{}
}
// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIGroup field is set to the value of the last call.
func (b *RoleRefApplyConfiguration) WithAPIGroup(value string) *RoleRefApplyConfiguration {
b.APIGroup = &value
return b
}
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *RoleRefApplyConfiguration) WithKind(value string) *RoleRefApplyConfiguration {
b.Kind = &value
return b
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *RoleRefApplyConfiguration) WithName(value string) *RoleRefApplyConfiguration {
b.Name = &value
return b
}
```
|
Penshurst Mosque is a mosque in the southern Sydney suburb of Penshurst, in the St George area. The mosque is supervised by the Australian Islamic Society of Bosnia and Herzegovina Inc.
The Penshurst Mosque was acquired in 1986 by the Bosnian community at 445-447 Forest Road, Penshurst. For over 30 years the mosque has been serving the needs of the Muslim community with a long history of co-existence and tolerance.
History
The earliest beginnings of Penshurst Mosque go back to the establishment of the Australian Bosnian Islamic Society which was formed in the late 1970s by Bosnian immigrants.
See also
Islam in Australia
List of mosques in Oceania
Mosques in Sydney
Mosques completed in 1983
1983 establishments in Australia
Penshurst, New South Wales
|
```forth
*> \brief \b SGBEQUB
*
* =========== DOCUMENTATION ===========
*
* Online html documentation available at
* path_to_url
*
*> \htmlonly
*> Download SGBEQUB + dependencies
*> <a href="path_to_url">
*> [TGZ]</a>
*> <a href="path_to_url">
*> [ZIP]</a>
*> <a href="path_to_url">
*> [TXT]</a>
*> \endhtmlonly
*
* Definition:
* ===========
*
* SUBROUTINE SGBEQUB( M, N, KL, KU, AB, LDAB, R, C, ROWCND, COLCND,
* AMAX, INFO )
*
* .. Scalar Arguments ..
* INTEGER INFO, KL, KU, LDAB, M, N
* REAL AMAX, COLCND, ROWCND
* ..
* .. Array Arguments ..
* REAL AB( LDAB, * ), C( * ), R( * )
* ..
*
*
*> \par Purpose:
* =============
*>
*> \verbatim
*>
*> SGBEQUB computes row and column scalings intended to equilibrate an
*> M-by-N matrix A and reduce its condition number. R returns the row
*> scale factors and C the column scale factors, chosen to try to make
*> the largest element in each row and column of the matrix B with
*> elements B(i,j)=R(i)*A(i,j)*C(j) have an absolute value of at most
*> the radix.
*>
*> R(i) and C(j) are restricted to be a power of the radix between
*> SMLNUM = smallest safe number and BIGNUM = largest safe number. Use
*> of these scaling factors is not guaranteed to reduce the condition
*> number of A but works well in practice.
*>
*> This routine differs from SGEEQU by restricting the scaling factors
*> to a power of the radix. Barring over- and underflow, scaling by
*> these factors introduces no additional rounding errors. However, the
*> scaled entries' magnitudes are no longer approximately 1 but lie
*> between sqrt(radix) and 1/sqrt(radix).
*> \endverbatim
*
* Arguments:
* ==========
*
*> \param[in] M
*> \verbatim
*> M is INTEGER
*> The number of rows of the matrix A. M >= 0.
*> \endverbatim
*>
*> \param[in] N
*> \verbatim
*> N is INTEGER
*> The number of columns of the matrix A. N >= 0.
*> \endverbatim
*>
*> \param[in] KL
*> \verbatim
*> KL is INTEGER
*> The number of subdiagonals within the band of A. KL >= 0.
*> \endverbatim
*>
*> \param[in] KU
*> \verbatim
*> KU is INTEGER
*> The number of superdiagonals within the band of A. KU >= 0.
*> \endverbatim
*>
*> \param[in] AB
*> \verbatim
*> AB is REAL array, dimension (LDAB,N)
*> On entry, the matrix A in band storage, in rows 1 to KL+KU+1.
*> The j-th column of A is stored in the j-th column of the
*> array AB as follows:
*> AB(KU+1+i-j,j) = A(i,j) for max(1,j-KU)<=i<=min(N,j+kl)
*> \endverbatim
*>
*> \param[in] LDAB
*> \verbatim
*> LDAB is INTEGER
*> The leading dimension of the array A. LDAB >= max(1,M).
*> \endverbatim
*>
*> \param[out] R
*> \verbatim
*> R is REAL array, dimension (M)
*> If INFO = 0 or INFO > M, R contains the row scale factors
*> for A.
*> \endverbatim
*>
*> \param[out] C
*> \verbatim
*> C is REAL array, dimension (N)
*> If INFO = 0, C contains the column scale factors for A.
*> \endverbatim
*>
*> \param[out] ROWCND
*> \verbatim
*> ROWCND is REAL
*> If INFO = 0 or INFO > M, ROWCND contains the ratio of the
*> smallest R(i) to the largest R(i). If ROWCND >= 0.1 and
*> AMAX is neither too large nor too small, it is not worth
*> scaling by R.
*> \endverbatim
*>
*> \param[out] COLCND
*> \verbatim
*> COLCND is REAL
*> If INFO = 0, COLCND contains the ratio of the smallest
*> C(i) to the largest C(i). If COLCND >= 0.1, it is not
*> worth scaling by C.
*> \endverbatim
*>
*> \param[out] AMAX
*> \verbatim
*> AMAX is REAL
*> Absolute value of largest matrix element. If AMAX is very
*> close to overflow or very close to underflow, the matrix
*> should be scaled.
*> \endverbatim
*>
*> \param[out] INFO
*> \verbatim
*> INFO is INTEGER
*> = 0: successful exit
*> < 0: if INFO = -i, the i-th argument had an illegal value
*> > 0: if INFO = i, and i is
*> <= M: the i-th row of A is exactly zero
*> > M: the (i-M)-th column of A is exactly zero
*> \endverbatim
*
* Authors:
* ========
*
*> \author Univ. of Tennessee
*> \author Univ. of California Berkeley
*> \author Univ. of Colorado Denver
*> \author NAG Ltd.
*
*> \ingroup gbequb
*
* =====================================================================
SUBROUTINE SGBEQUB( M, N, KL, KU, AB, LDAB, R, C, ROWCND,
$ COLCND,
$ AMAX, INFO )
*
* -- LAPACK computational routine --
* -- LAPACK is a software package provided by Univ. of Tennessee, --
* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
*
* .. Scalar Arguments ..
INTEGER INFO, KL, KU, LDAB, M, N
REAL AMAX, COLCND, ROWCND
* ..
* .. Array Arguments ..
REAL AB( LDAB, * ), C( * ), R( * )
* ..
*
* =====================================================================
*
* .. Parameters ..
REAL ONE, ZERO
PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 )
* ..
* .. Local Scalars ..
INTEGER I, J, KD
REAL BIGNUM, RCMAX, RCMIN, SMLNUM, RADIX, LOGRDX
* ..
* .. External Functions ..
REAL SLAMCH
EXTERNAL SLAMCH
* ..
* .. External Subroutines ..
EXTERNAL XERBLA
* ..
* .. Intrinsic Functions ..
INTRINSIC ABS, MAX, MIN, LOG
* ..
* .. Executable Statements ..
*
* Test the input parameters.
*
INFO = 0
IF( M.LT.0 ) THEN
INFO = -1
ELSE IF( N.LT.0 ) THEN
INFO = -2
ELSE IF( KL.LT.0 ) THEN
INFO = -3
ELSE IF( KU.LT.0 ) THEN
INFO = -4
ELSE IF( LDAB.LT.KL+KU+1 ) THEN
INFO = -6
END IF
IF( INFO.NE.0 ) THEN
CALL XERBLA( 'SGBEQUB', -INFO )
RETURN
END IF
*
* Quick return if possible.
*
IF( M.EQ.0 .OR. N.EQ.0 ) THEN
ROWCND = ONE
COLCND = ONE
AMAX = ZERO
RETURN
END IF
*
* Get machine constants. Assume SMLNUM is a power of the radix.
*
SMLNUM = SLAMCH( 'S' )
BIGNUM = ONE / SMLNUM
RADIX = SLAMCH( 'B' )
LOGRDX = LOG(RADIX)
*
* Compute row scale factors.
*
DO 10 I = 1, M
R( I ) = ZERO
10 CONTINUE
*
* Find the maximum element in each row.
*
KD = KU + 1
DO 30 J = 1, N
DO 20 I = MAX( J-KU, 1 ), MIN( J+KL, M )
R( I ) = MAX( R( I ), ABS( AB( KD+I-J, J ) ) )
20 CONTINUE
30 CONTINUE
DO I = 1, M
IF( R( I ).GT.ZERO ) THEN
R( I ) = RADIX**INT( LOG( R( I ) ) / LOGRDX )
END IF
END DO
*
* Find the maximum and minimum scale factors.
*
RCMIN = BIGNUM
RCMAX = ZERO
DO 40 I = 1, M
RCMAX = MAX( RCMAX, R( I ) )
RCMIN = MIN( RCMIN, R( I ) )
40 CONTINUE
AMAX = RCMAX
*
IF( RCMIN.EQ.ZERO ) THEN
*
* Find the first zero scale factor and return an error code.
*
DO 50 I = 1, M
IF( R( I ).EQ.ZERO ) THEN
INFO = I
RETURN
END IF
50 CONTINUE
ELSE
*
* Invert the scale factors.
*
DO 60 I = 1, M
R( I ) = ONE / MIN( MAX( R( I ), SMLNUM ), BIGNUM )
60 CONTINUE
*
* Compute ROWCND = min(R(I)) / max(R(I)).
*
ROWCND = MAX( RCMIN, SMLNUM ) / MIN( RCMAX, BIGNUM )
END IF
*
* Compute column scale factors.
*
DO 70 J = 1, N
C( J ) = ZERO
70 CONTINUE
*
* Find the maximum element in each column,
* assuming the row scaling computed above.
*
DO 90 J = 1, N
DO 80 I = MAX( J-KU, 1 ), MIN( J+KL, M )
C( J ) = MAX( C( J ), ABS( AB( KD+I-J, J ) )*R( I ) )
80 CONTINUE
IF( C( J ).GT.ZERO ) THEN
C( J ) = RADIX**INT( LOG( C( J ) ) / LOGRDX )
END IF
90 CONTINUE
*
* Find the maximum and minimum scale factors.
*
RCMIN = BIGNUM
RCMAX = ZERO
DO 100 J = 1, N
RCMIN = MIN( RCMIN, C( J ) )
RCMAX = MAX( RCMAX, C( J ) )
100 CONTINUE
*
IF( RCMIN.EQ.ZERO ) THEN
*
* Find the first zero scale factor and return an error code.
*
DO 110 J = 1, N
IF( C( J ).EQ.ZERO ) THEN
INFO = M + J
RETURN
END IF
110 CONTINUE
ELSE
*
* Invert the scale factors.
*
DO 120 J = 1, N
C( J ) = ONE / MIN( MAX( C( J ), SMLNUM ), BIGNUM )
120 CONTINUE
*
* Compute COLCND = min(C(J)) / max(C(J)).
*
COLCND = MAX( RCMIN, SMLNUM ) / MIN( RCMAX, BIGNUM )
END IF
*
RETURN
*
* End of SGBEQUB
*
END
```
|
Flat Rock is an unincorporated community in Caldwell County, Kentucky, United States.
Flat Rock was named for the natural outcrop which surrounded the settlement.
References
Unincorporated communities in Caldwell County, Kentucky
Unincorporated communities in Kentucky
|
```xml
<?xml version="1.0" encoding="UTF-8"?>
<document type="com.apple.InterfaceBuilder3.CocoaTouch.XIB" version="3.0" toolsVersion="11762" systemVersion="16A323" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" colorMatched="YES">
<device id="retina4_7" orientation="portrait">
<adaptation id="fullscreen"/>
</device>
<dependencies>
<plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="11757"/>
<capability name="documents saved in the Xcode 8 format" minToolsVersion="8.0"/>
</dependencies>
<objects>
<placeholder placeholderIdentifier="IBFilesOwner" id="-1" userLabel="File's Owner"/>
<placeholder placeholderIdentifier="IBFirstResponder" id="-2" customClass="UIResponder"/>
<tableViewCell clipsSubviews="YES" contentMode="scaleToFill" selectionStyle="none" indentationWidth="10" rowHeight="91" id="cED-sB-eaw" customClass="ImageExampleTableViewCell" customModule="Spruce_Example" customModuleProvider="target">
<rect key="frame" x="0.0" y="0.0" width="661" height="91"/>
<autoresizingMask key="autoresizingMask"/>
<tableViewCellContentView key="contentView" opaque="NO" multipleTouchEnabled="YES" contentMode="center" tableViewCell="cED-sB-eaw" id="0hn-P0-Vd1">
<rect key="frame" x="0.0" y="0.0" width="661" height="91"/>
<autoresizingMask key="autoresizingMask"/>
<subviews>
<view contentMode="scaleToFill" translatesAutoresizingMaskIntoConstraints="NO" id="DvL-Se-L2d">
<rect key="frame" x="10" y="5" width="641" height="81"/>
<subviews>
<view clipsSubviews="YES" alpha="0.60000002384185791" contentMode="scaleToFill" translatesAutoresizingMaskIntoConstraints="NO" id="M9B-Kq-yXt" userLabel="Image">
<rect key="frame" x="20" y="11" width="60" height="60"/>
<color key="backgroundColor" red="0.2549158037" green="0.71301728490000005" blue="0.5825116038" alpha="1" colorSpace="custom" customColorSpace="sRGB"/>
<constraints>
<constraint firstAttribute="width" constant="60" id="gN6-jK-kQo"/>
<constraint firstAttribute="height" constant="60" id="qzE-kh-ROu"/>
</constraints>
<userDefinedRuntimeAttributes>
<userDefinedRuntimeAttribute type="number" keyPath="layer.cornerRadius">
<integer key="value" value="3"/>
</userDefinedRuntimeAttribute>
</userDefinedRuntimeAttributes>
</view>
<view clipsSubviews="YES" alpha="0.59999999999999998" contentMode="scaleToFill" translatesAutoresizingMaskIntoConstraints="NO" id="bUr-bf-0u1" userLabel="Title">
<rect key="frame" x="100" y="25" width="140" height="10"/>
<color key="backgroundColor" red="0.7396678635832572" green="0.7396678635832572" blue="0.7396678635832572" alpha="1" colorSpace="custom" customColorSpace="sRGB"/>
<constraints>
<constraint firstAttribute="height" constant="10" id="1VL-O6-sE6"/>
<constraint firstAttribute="width" constant="140" id="xap-fl-zBo"/>
</constraints>
<userDefinedRuntimeAttributes>
<userDefinedRuntimeAttribute type="number" keyPath="layer.cornerRadius">
<integer key="value" value="3"/>
</userDefinedRuntimeAttribute>
</userDefinedRuntimeAttributes>
</view>
<view clipsSubviews="YES" contentMode="scaleToFill" translatesAutoresizingMaskIntoConstraints="NO" id="85X-Tf-Rrg" userLabel="Top Description">
<rect key="frame" x="100" y="45" width="521" height="10"/>
<color key="backgroundColor" red="0.92941176469999998" green="0.92941176469999998" blue="0.92941176469999998" alpha="1" colorSpace="calibratedRGB"/>
<constraints>
<constraint firstAttribute="height" constant="10" id="Iex-dl-gKO"/>
</constraints>
<userDefinedRuntimeAttributes>
<userDefinedRuntimeAttribute type="number" keyPath="layer.cornerRadius">
<integer key="value" value="3"/>
</userDefinedRuntimeAttribute>
</userDefinedRuntimeAttributes>
</view>
<view clipsSubviews="YES" contentMode="scaleToFill" translatesAutoresizingMaskIntoConstraints="NO" id="tii-Oo-XR1" userLabel="Bottom Description">
<rect key="frame" x="100" y="65" width="461" height="10"/>
<color key="backgroundColor" red="0.92941176469999998" green="0.92941176469999998" blue="0.92941176469999998" alpha="1" colorSpace="calibratedRGB"/>
<constraints>
<constraint firstAttribute="height" constant="10" id="0Oe-in-CbF"/>
</constraints>
<userDefinedRuntimeAttributes>
<userDefinedRuntimeAttribute type="number" keyPath="layer.cornerRadius">
<integer key="value" value="3"/>
</userDefinedRuntimeAttribute>
</userDefinedRuntimeAttributes>
</view>
</subviews>
<color key="backgroundColor" white="1" alpha="1" colorSpace="calibratedWhite"/>
<constraints>
<constraint firstItem="85X-Tf-Rrg" firstAttribute="leading" secondItem="M9B-Kq-yXt" secondAttribute="trailing" constant="20" id="EvH-c4-LHL"/>
<constraint firstItem="tii-Oo-XR1" firstAttribute="top" secondItem="85X-Tf-Rrg" secondAttribute="bottom" constant="10" id="PNM-cd-QsR"/>
<constraint firstItem="bUr-bf-0u1" firstAttribute="top" secondItem="DvL-Se-L2d" secondAttribute="top" constant="25" id="XdX-xL-4Xw"/>
<constraint firstItem="bUr-bf-0u1" firstAttribute="leading" secondItem="M9B-Kq-yXt" secondAttribute="trailing" constant="20" id="ZCg-SU-uMk"/>
<constraint firstItem="85X-Tf-Rrg" firstAttribute="top" secondItem="bUr-bf-0u1" secondAttribute="bottom" constant="10" id="a6V-No-4aq"/>
<constraint firstItem="M9B-Kq-yXt" firstAttribute="centerY" secondItem="DvL-Se-L2d" secondAttribute="centerY" id="fUD-7l-8Ra"/>
<constraint firstItem="M9B-Kq-yXt" firstAttribute="leading" secondItem="DvL-Se-L2d" secondAttribute="leading" constant="20" id="gTN-uX-hlu"/>
<constraint firstAttribute="trailing" secondItem="85X-Tf-Rrg" secondAttribute="trailing" constant="20" id="tBJ-ja-gL6"/>
<constraint firstAttribute="trailing" secondItem="tii-Oo-XR1" secondAttribute="trailing" constant="80" id="uwh-v0-g0A"/>
<constraint firstItem="tii-Oo-XR1" firstAttribute="leading" secondItem="M9B-Kq-yXt" secondAttribute="trailing" constant="20" id="yJH-Ay-faK"/>
</constraints>
<userDefinedRuntimeAttributes>
<userDefinedRuntimeAttribute type="number" keyPath="layer.cornerRadius">
<integer key="value" value="2"/>
</userDefinedRuntimeAttribute>
<userDefinedRuntimeAttribute type="number" keyPath="layer.shadowRadius">
<integer key="value" value="3"/>
</userDefinedRuntimeAttribute>
<userDefinedRuntimeAttribute type="number" keyPath="layer.shadowOpacity">
<real key="value" value="0.050000000000000003"/>
</userDefinedRuntimeAttribute>
<userDefinedRuntimeAttribute type="boolean" keyPath="layer.masksToBounds" value="NO"/>
<userDefinedRuntimeAttribute type="size" keyPath="layer.shadowOffset">
<size key="value" width="0.0" height="0.0"/>
</userDefinedRuntimeAttribute>
</userDefinedRuntimeAttributes>
</view>
</subviews>
<color key="backgroundColor" white="0.0" alpha="0.0" colorSpace="calibratedWhite"/>
<constraints>
<constraint firstAttribute="bottom" secondItem="DvL-Se-L2d" secondAttribute="bottom" constant="5" id="196-MU-B5d"/>
<constraint firstAttribute="trailing" secondItem="DvL-Se-L2d" secondAttribute="trailing" constant="10" id="5bG-oQ-a2I"/>
<constraint firstItem="DvL-Se-L2d" firstAttribute="leading" secondItem="0hn-P0-Vd1" secondAttribute="leading" constant="10" id="G10-h7-s0r"/>
<constraint firstItem="DvL-Se-L2d" firstAttribute="top" secondItem="0hn-P0-Vd1" secondAttribute="top" constant="5" id="UMs-xw-KWE"/>
</constraints>
</tableViewCellContentView>
<color key="backgroundColor" white="0.0" alpha="0.0" colorSpace="calibratedWhite"/>
<connections>
<outlet property="iconView" destination="M9B-Kq-yXt" id="Nai-WW-9WF"/>
</connections>
<point key="canvasLocation" x="-207.5" y="-53.5"/>
</tableViewCell>
</objects>
</document>
```
|
```python
# Owner(s): ["module: dynamo"]
"""
Basic tests to assert and illustrate the behavior around the decision to use 0D
arrays in place of array scalars.
Extensive tests of this sort of functionality is in numpy_tests/core/*scalar*
Also test the isscalar function (which is deliberately a bit more lax).
"""
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase,
xfailIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_equal
parametrize_value = parametrize(
"value",
[
subtest(np.int64(42), name="int64"),
subtest(np.array(42), name="array"),
subtest(np.asarray(42), name="asarray"),
subtest(np.asarray(np.int64(42)), name="asarray_int"),
],
)
@instantiate_parametrized_tests
class TestArrayScalars(TestCase):
@parametrize_value
def test_array_scalar_basic(self, value):
assert value.ndim == 0
assert value.shape == ()
assert value.size == 1
assert value.dtype == np.dtype("int64")
@parametrize_value
def test_conversion_to_int(self, value):
py_scalar = int(value)
assert py_scalar == 42
assert isinstance(py_scalar, int)
assert not isinstance(value, int)
@parametrize_value
def test_decay_to_py_scalar(self, value):
# NumPy distinguishes array scalars and 0D arrays. For instance
# `scalar * list` is equivalent to `int(scalar) * list`, but
# `0D array * list` is equivalent to `0D array * np.asarray(list)`.
# Our scalars follow 0D array behavior (because they are 0D arrays)
lst = [1, 2, 3]
product = value * lst
assert isinstance(product, np.ndarray)
assert product.shape == (3,)
assert_equal(product, [42, 42 * 2, 42 * 3])
# repeat with right-mulitply
product = lst * value
assert isinstance(product, np.ndarray)
assert product.shape == (3,)
assert_equal(product, [42, 42 * 2, 42 * 3])
def test_scalar_comparisons(self):
scalar = np.int64(42)
arr = np.array(42)
assert arr == scalar
assert arr >= scalar
assert arr <= scalar
assert scalar == 42
assert arr == 42
# @xfailIfTorchDynamo
@instantiate_parametrized_tests
class TestIsScalar(TestCase):
#
# np.isscalar(...) checks that its argument is a numeric object with exactly one element.
#
# This differs from NumPy which also requires that shape == ().
#
scalars = [
subtest(42, "literal"),
subtest(int(42.0), "int"),
subtest(np.float32(42), "float32"),
subtest(np.array(42), "array_0D", decorators=[xfailIfTorchDynamo]),
subtest([42], "list", decorators=[xfailIfTorchDynamo]),
subtest([[42]], "list-list", decorators=[xfailIfTorchDynamo]),
subtest(np.array([42]), "array_1D", decorators=[xfailIfTorchDynamo]),
subtest(np.array([[42]]), "array_2D", decorators=[xfailIfTorchDynamo]),
]
import math
not_scalars = [
int,
np.float32,
subtest("s", decorators=[xfailIfTorchDynamo]),
subtest("string", decorators=[xfailIfTorchDynamo]),
(),
[],
math.sin,
np,
np.transpose,
[1, 2],
np.asarray([1, 2]),
np.float32([1, 2]),
]
@parametrize("value", scalars)
def test_is_scalar(self, value):
assert np.isscalar(value)
@parametrize("value", not_scalars)
def test_is_not_scalar(self, value):
assert not np.isscalar(value)
if __name__ == "__main__":
run_tests()
```
|
```objective-c
/* TREELANG Compiler definitions for interfacing to treetree.c
(compiler back end interface).
2004, 2005 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify it
Free Software Foundation; either version 2, or (at your option) any
later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
along with this program; if not, write to the Free Software
Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.
In other words, you are welcome to use, share and improve this program.
You are forbidden to forbid anyone else to use, share and improve
what you give them. Help stamp out software-hoarding!
your_sha256_hash-----------
Written by Tim Josling 1999, 2000, 2001, based in part on other
parts of the GCC compiler. */
tree tree_code_init_parameters (void);
tree tree_code_add_parameter (tree list, tree proto_exp, tree exp);
tree tree_code_get_integer_value (unsigned char *chars, unsigned int length);
void tree_code_generate_return (tree type, tree exp);
void tree_ggc_storage_always_used (void *m);
tree tree_code_get_expression (unsigned int exp_type, tree type, tree op1,
tree op2, tree op3, location_t loc);
tree tree_code_get_numeric_type (unsigned int size1, unsigned int sign1);
void tree_code_create_function_initial (tree prev_saved,
location_t loc);
void tree_code_create_function_wrapup (location_t loc);
tree tree_code_create_function_prototype (unsigned char* chars,
unsigned int storage_class,
unsigned int ret_type,
struct prod_token_parm_item* parms,
location_t loc);
tree tree_code_create_variable (unsigned int storage_class,
unsigned char* chars,
unsigned int length,
unsigned int expression_type,
tree init,
location_t loc);
void tree_code_output_expression_statement (tree code,
location_t loc);
void tree_code_if_start (tree exp, location_t loc);
void tree_code_if_else (location_t loc);
void tree_code_if_end (location_t loc);
tree tree_code_get_type (int type_num);
void treelang_init_decl_processing (void);
void treelang_finish (void);
bool treelang_init (void);
unsigned int treelang_init_options (unsigned int, const char **);
int treelang_handle_option (size_t scode, const char *arg, int value);
void treelang_parse_file (int debug_flag);
void push_var_level (void);
void pop_var_level (void);
const char* get_string (const char *s, size_t l);
```
|
The Doubleday myth is the claim that the sport of baseball was invented in 1839 by future American Civil War general Abner Doubleday in Cooperstown, New York. In response to a dispute over whether baseball originated in the United States or was a variation of the British game rounders, the Mills Commission was formed in 1905 to seek out evidence. Mining engineer Abner Graves authored a letter claiming that Doubleday invented baseball. The letter was published in a newspaper and eventually used by the Mills Commission to support its finding that the game was of American origin. In 1908, it named Doubleday the creator of baseball.
The claim initially received a favorable reception from Americans, but eventually garnered criticism from various writers. Modern baseball historians generally consider the myth to be false. Graves' testimony has been critiqued in various regards, as the details of his story and his reliability as a witness have been questioned, and the Mills Commission made departures from his letter in its report. The National Baseball Hall of Fame and Museum was built in the town where Graves said the game was created, Cooperstown. The legend is well known among fans of the sport.
Background
In the late 19th century and early 20th century, a dispute arose about the origins of baseball and whether it had been invented in the United States or developed as a variation of rounders, a game played in Great Britain and Ireland. The theory that the sport was created in the U.S. was backed by Chicago Cubs president Albert Spalding and National League (NL) president Abraham G. Mills. In 1889, Mills gave a speech during a banquet to honor the Chicago team and a group of NL all-stars, both of which had participated in a world tour to promote the game. During his remarks at Delmonico's restaurant in New York City, Mills declared that baseball was strictly American, which he said was determined through "patriotism and research"; his audience of about 300 people responded by chanting "No rounders!" The rounders theory was supported by prominent sportswriter Henry Chadwick, a native of Britain who noted common factors between rounders and baseball in a 1903 article. Like baseball, rounders features nine-player teams, fields with four bases, and clubs alternately batting during a selected number of innings. In contrast to baseball, in which bags are used for bases, rounders games utilize sticks; another key difference between the games is that foul balls do not occur in rounders. Chadwick said in his piece that "There is no doubt whatever as to base ball having originated from the two-centuries-old English game of rounders." Spalding disputed Chadwick's article in the next version of his Spalding Base Ball Guide.
In 1905, Spalding called for an investigation into how the sport was invented. Chadwick supported the idea, and later in the year a commission was formed. Spalding instructed the commission to decide between the American game of "Old Cat" and rounders as baseball's predecessor. Seven men served on the commission, including Mills. Spalding chose the committee's members, picking men who supported his theory and excluding supporters of the rounders claim, such as Chadwick. The committee sought information on the beginnings of the sport from members of the public, soliciting feedback in publications. It received numerous letters, primarily from former players. Many of the details they provided pertained to early variations of baseball, but evidence supporting Spalding's theory was lacking. On April 1, 1905, the Akron Beacon Journal newspaper published an article by Spalding that asked for details on the beginnings of the game to be sent to Amateur Athletic Union president James Sullivan, who was responsible for compiling information and presenting it to the commission. Spalding called the rounders theory "pap" and wrote that he would "refuse to swallow any more of it without some substantial proof sauce with it."
Letter by Abner Graves
In response to Spalding's request for information on early baseball in the Beacon Journal, mining engineer Abner Graves of Denver, Colorado, wrote a letter to the editor stating that he had seen Abner Doubleday create a diagram of a baseball field. Doubleday (1819–1893) was a career United States Army officer who attained the rank of major general in the Union Army during the American Civil War.
According to Graves' letter, Doubleday set up the first baseball game in Cooperstown, New York, in approximately 1839. The letter, dated April 3, stated that Doubleday had invented baseball as a modified version of town ball, with four bases on the field and batters who attempted to hit tosses from a pitcher standing in a six-foot ring. According to Graves, the first game had matched players from "Otesego academy and Green's Select school". In his letter, Graves claimed that he and Doubleday were both students at Green's school. Graves' description of Doubleday's game indicated that each team had 11 players: the pitcher, a catcher, three infielders by the bases, two further infielders who covered the areas between the bases, and four outfielders. It listed the names of seven players from an early game that Graves claimed to have seen. The April 4 edition of the Beacon Journal included the first story that described Graves' Doubleday claims, with a headline that read, "Abner Doubleday Invented Base Ball".
The topic received coverage in the Sporting Life newspaper later in 1905. Spalding wrote a letter to Graves asking for evidence to back up his claim; Graves responded by sending a diagram matching the one he said Doubleday had drawn, along with a letter stating that the original had not been preserved and that most of the players at the time were no longer alive. This correspondence stated that the initial game took place between 1839 and 1841. Although Graves was unable to provide further evidence to back his claims, Spalding supported his version of events. The members of the Mills Commission received the available evidence in October 1907, and Mills wrote a report to Sullivan summarizing the findings on December 30. His report gave Doubleday credit for inventing the game of baseball and said that the sport was American in origin, listing 1839 as the year of its creation. Mills said that he understood why Doubleday would make changes to town ball, reducing the number of players in an effort to decrease the risk of injury. He noted that the number of players per team was higher than the nine in modern baseball, but explained this by indicating that he had taken part in games with 11 players per side. Additionally, Mills wrote that he thought Doubleday might have created the modern defensive putout system, which replaced the town ball method in which fielders could hit baserunners with thrown balls to record outs, even though Graves' testimony did not make this claim.
No one else on the committee sent any material to Sullivan after receiving the documentation; one member, Arthur Pue Gorman, had died. The surviving commission members were sent the letter by Mills, which was signed by each of them. Spalding later used the report's acceptance of the Doubleday myth to claim U.S. origins in his baseball history book, America's National Game. Graves' name did not appear in the book; Spalding said that the Doubleday content had come from "a circumstantial statement by a reputable gentleman", quoting Mills, and that he had "nothing to add to [the commission's] report." In his book, Spalding expressed delight that an American Army general had been found to be baseball's creator.
A reporter for The Denver Post interviewed Graves for a 1912 article, which contained a version of the Doubleday story that varied from what had been given to the Mills Commission in several respects. Graves placed the year of the first game as 1840, one year later than Mills had reported. In the interview, he said that he had played in the game, as a "Green College" student. No university of that name in Cooperstown is known to have been in existence. Graves was possibly referencing Major Duff's Classical and Military Academy, an elementary school whose pupils were nicknamed "Duff's Greens", which could have been the source for Graves' previous identification of "Green's Select" school. The college claim contradicted a previous letter in which he said he had been at Frog Hollow School, another elementary school, when baseball was created by Doubleday. The reporter did not question Graves' account, which included a statement that the 78-year-old was preparing to play in a local exhibition game. Graves again claimed to have taken part in the first game in a 1916 letter published in The Freeman's Journal.
Creation of the Hall of Fame in Cooperstown
The National Baseball Hall of Fame and Museum was built in the village that served as the location of Doubleday's alleged first game, Cooperstown. An article in the 1920 edition of the Spalding Baseball Guide supported the idea of a monument to Doubleday in Cooperstown. NL president John Heydler offered his backing for Cooperstown's efforts to purchase the grounds where Doubleday was said to have created baseball. In 1923, the village succeeded in buying the property. A baseball stadium—Doubleday Field—was erected there. Around 1934, a baseball said to be from Graves' family was found and purchased by Stephen Carlton Clark, a powerful figure in Cooperstown who created an exhibit in what became the National Baseball Museum around it. The concept of a baseball museum was supported by new NL president Ford Frick, who suggested that a Hall of Fame be created in connection with it. The Hall was subsequently built in Cooperstown. Clark's purchase came to be known as the "Doubleday ball", as the belief arose that it was used by him.
A committee from the New York State Legislature traveled to Cooperstown in 1937, and its subsequent report declared that the town was "the birthplace of baseball" and recommended a 100th anniversary celebration in 1939; events that were held included the dedication of the Hall and an all-star game. Prior to the ceremonies, the Doubleday claims were criticized by multiple parties: author Robert Henderson wrote that rounders and baseball were related, and Alexander Cartwright's son Bruce reported that his father had invented the sport. (Some sources have reported that fourteen years later, in 1953, the United States Congress formally recognized Cartwright as the inventor of modern baseball, but no documentation of such a declaration exists in the Congressional Record.) As part of Bruce Cartwright's efforts, the manager of Honolulu's Chamber of Commerce sent Hall promoter Alexander Cleland a letter that questioned Graves' account. In response, Cleland promised that a "Cartwright Day" would be included in the anniversary events at Cooperstown, which went ahead as scheduled.
Harold Seymour and Dorothy Seymour Mills wrote, "Some sports columnists pointed out the discrepancy; others got around it as gracefully as possible." The United States Postal Service marked the anniversary by releasing a commemorative stamp, which did not feature an image of Doubleday. The Hall's day honoring Cartwright was held in the summer of 1939. By this time, Cartwright was a member of the Hall; in 1938, the Centennial Committee had elected him. The honor came weeks after Clark had investigated Doubleday's role in baseball's origins in response to the Cartwright reports.
Reception
Contemporary reactions
After the release of Mills' report, which was published in the 1908 version of Spalding's Guide, the belief that Doubleday had invented baseball "gained currency among the general public" in the U.S., according to author Brian Martin. Textbooks recorded the Civil War veteran's creation of the game, as many Americans accepted the idea that it had originated in their country. By 1909, critiques of the report began to appear in the media. In the May 1909 edition of the magazine Collier's, writer William Henry Irwin offered multiple criticisms. First, he expressed the belief that, prior to both Doubleday's purported invention and the existence of rounders, Britain had a sport with the baseball name. In addition, he noted that Doubleday was in West Point, New York, in 1839. That year, he was a United States Military Academy (USMA) plebe. It is unlikely that Doubleday traveled to Cooperstown in 1839, as first-year cadets such as Doubleday were rarely given leave at the time. Also in 1909, The Sporting News' founder, Alfred Henry Spink, received a letter from sportswriter William M. Rankin, which called the Doubleday claims false, citing United States Department of War and West Point records, and said that the New York Knickerbockers had invented baseball in 1845. The articles did little to change popular sentiment at the time.
More stories critical of the Doubleday claims were published in 1939, the 100th anniversary of the supposed invention in 1839. Sportswriter Frank Menke, who believed that baseball evolved from cricket, authored the book Encyclopedia of Sports, in which he published the report from the Mills Commission and critiqued it. Among other comments, he wrote that a reference had been made to a drawing by Doubleday, which had not been known to exist. Another point he raised regarded a link between Doubleday and Mills. Despite having been around Doubleday during the Civil War and later, Mills mentioned no personal involvement in baseball by Doubleday before Graves' testimony was released. Menke's views were given publicity by New York City newspaper reporter Bob Considine. Later in 1939, Henderson wrote that the sport had been detailed in documents dating back to the mid-1830s. A story in The New York Times called Henderson's work "a regular bomb on the big baseball program" that was scheduled for Cooperstown, but suggested that the 1839 origin story had "been accepted for centennial celebration by common agreement among peace loving citizens." Skepticism of the Doubleday myth took hold by the middle of the century, though. Clark himself eventually expressed doubt that a single person had created the sport.
Modern analysis
The Doubleday story is widely discredited among modern baseball historians. The recollections of Graves have been criticized because Graves was five years old in 1839, and 71 when he first made the Doubleday claims, leading to the possibility of inaccurate memories. Author William Ryczek notes that Graves did not claim to have attended the first game in his initial letter, but stated that he learned of it having been in Cooperstown. Although Spalding referred to Doubleday and Graves as "playmates" in his submission of evidence to the Mills Commission, Doubleday was more than a decade older than Graves, turning 20 in 1839. Ryczek describes Graves as an unreliable witness. One of his other claims, which he made to reporters, was that he was a deliveryman for the Pony Express. Graves said that he had worked for the service in 1852, eight years before it was founded. Late in his life, he shot and killed his wife; he was found insane by a jury and committed to a psychiatric hospital. Graves also expressed anti-English sentiments in a letter to the Mills Commission, writing, "Just in my present mood I would rather have Uncle Sam declare war on England and clean her up rather than have one of her citizens beat us out of Base Ball."
Author Brian Martin adds that Graves' account was tweaked by the Mills report in multiple ways. Information on fielders throwing at runners was removed, which Martin considers an attempt to show similarity to the baseball being played at the time. In addition, 1839 was called the year of the game's creation by Mills when 1841 was also a possibility according to Graves, who had written that the invention occurred before or after the 1840 presidential campaign of William Henry Harrison, during the spring months.
Doubleday himself made only one mention of baseball in his letters or diaries before his 1893 death; the only time the sport appears in his papers dates from 1871, when he penned a request for equipment. One obituary of Doubleday noted that he had displayed no real interest in outdoor sports during his life. A theory expressed by historian David Block is that Graves had actually known one of Doubleday's cousins, Abner Demas or John—both Cooperstown residents—and eventually the more famous Abner was whom he remembered. While denying that the Doubleday family factored into baseball's creation, fellow historian Peter Morris noted that it is "conceivable that Graves's recollections had some slim basis in fact." Historian John Thorn said that Spalding had a connection to Doubleday: he financially supported the Theosophical Society, a group in which Doubleday served as a chapter vice president.
Author Robert Elias credits the Doubleday myth for contributing to the idea of American exceptionalism. Elias cites Doubleday's history with the U.S. military, as well as the sense that "having a homegrown sport was important for America's national identity." Historian David Block wrote that Americans had been eager to accept the Doubleday story when it came out, at a time when the U.S. was growing in influence. While calling the Doubleday legend "amusingly fraudulent", Alexander Cartwright biographer Harold Peterson said that it had "obstinate durability."
Legacy
Long after the Doubleday myth was declared false by historians, it remains an object of fascination. Tim Arango of The New York Times wrote that the story "has taken a position in the pantheon of great American myths, alongside George Washington's cherry tree, Paul Bunyan and Johnny Appleseed." It was written about in numerous publications, and became well known among baseball fans. Thorn described Doubleday as "the man who did not invent baseball but instead was invented by it." The myth has received the backing of Major League Baseball commissioner Bud Selig, who said in 2010 that "I really believe that Abner Doubleday is the 'Father of Baseball. At one time, the state of New York made similar statements in promotions for Cooperstown.
While Chadwick biographer Andrew Schiff noted that "there is no clear inventor of the game", further research has been done on the origins of baseball. In 2004, a document was found that dated a sport called baseball to at least 1791, almost 50 years before Doubleday's supposed invention. It was an ordinance from Pittsfield, Massachusetts, which banned baseball from being played within 80 yards of a meeting house in the city; this implies that the game already existed at the time. The theory that activities such as rounders led to modern baseball remains common among modern historians, and older bat-and-ball games have been cited as well.
An extension of the legend developed later involving the growth of baseball in Mexico. Doubleday, who was in the country as part of the Mexican–American War, was alleged to have organized games for military camps, which drew interest from Mexican spectators.
In 1996, the Auburn Astros Minor League Baseball franchise changed its name to the Auburn Doubledays to honor the purported inventor of baseball. A motel in Cooperstown is also named after Doubleday, but unlike Cartwright, Doubleday was never inducted into the Hall. Nonetheless, the Hall supported the Doubleday myth for many years. More recently, the Hall has taken a small step away from the myth; when it announced special events in conjunction with its 75th year of operation in 2013–14, it made the following statement in its official press release:
The Hall states that Cooperstown is "representative" of the location of baseball's birthplace, although Doubleday Field has a plaque and sign that repeat the myth's claims. In the Hall's museum, the Doubleday ball's modern display rejects the Doubleday myth, with writings that call it "a thriving legend that reflects Americans' desire to make the game our own."
References
Bibliography
American legends
History of baseball
Misconceptions
|
```objective-c
// 2016 and later: Unicode, Inc. and others.
/*
******************************************************************************
* and others. All Rights Reserved.
******************************************************************************
*
* File PERSNCAL.H
*
* Modification History:
*
* Date Name Description
* 9/23/2003 mehran posted to icu-design
*****************************************************************************
*/
#ifndef PERSNCAL_H
#define PERSNCAL_H
#include "unicode/utypes.h"
#if !UCONFIG_NO_FORMATTING
#include "unicode/calendar.h"
U_NAMESPACE_BEGIN
/**
* <code>PersianCalendar</code> is a subclass of <code>Calendar</code>
* that implements the Persian calendar. It is used as the official
* calendar in Iran. This calendar is also known as the "Hijri Shamsi"
* calendar, since it starts at the time of Mohammed's emigration (or
* "hijra") to Medinah on Thursday, July 15, 622 AD (Julian) and is a
* solar calendar system (or "shamsi").
* <p>
* The Persian calendar is strictly solar, and thus a Persian year has twelve
* solar months. A Persian year is about 365 days long, except in leap years
* which is 366 days long.
* <p>
* The six first months of Persian Calendar are 31 days long. The next five
* months are 30 days long. The last month is 29 days long in normal years,
* and 30 days long in leap years.
*
* @see GregorianCalendar
*
* @author Mehran Mehr
* @internal
*/
class PersianCalendar : public Calendar {
public:
//your_sha256_hash---------
// Constants...
//your_sha256_hash---------
/**
* Constants for the months
* @internal
*/
enum EMonths {
/**
* Constant for Farvardin, the 1st month of the Persian year.
* @internal
*/
FARVARDIN = 0,
/**
* Constant for Ordibehesht, the 2nd month of the Persian year.
* @internal
*/
ORDIBEHESHT = 1,
/**
* Constant for Khordad, the 3rd month of the Persian year.
* @internal
*/
KHORDAD = 2,
/**
* Constant for Tir, the 4th month of the Persian year.
* @internal
*/
TIR = 3,
/**
* Constant for Mordad, the 5th month of the Persian year.
* @internal
*/
MORDAD = 4,
/**
* Constant for Shahrivar, the 6th month of the Persian year.
* @internal
*/
SHAHRIVAR = 5,
/**
* Constant for Mehr, the 7th month of the Persian year.
* @internal
*/
MEHR = 6,
/**
* Constant for Aban, the 8th month of the Persian year.
* @internal
*/
ABAN = 7,
/**
* Constant for Azar, the 9th month of the Persian year.
* @internal
*/
AZAR = 8,
/**
* Constant for Dei, the 10th month of the Persian year.
* @internal
*/
DEI = 9,
/**
* Constant for Bahman, the 11th month of the Persian year.
* @internal
*/
BAHMAN = 10,
/**
* Constant for Esfand, the 12th month of the Persian year.
* @internal
*/
ESFAND = 11,
PERSIAN_MONTH_MAX
};
//your_sha256_hash---------
// Constructors...
//your_sha256_hash---------
/**
* Constructs a PersianCalendar based on the current time in the default time zone
* with the given locale.
*
* @param aLocale The given locale.
* @param success Indicates the status of PersianCalendar object construction.
* Returns U_ZERO_ERROR if constructed successfully.
* @internal
*/
PersianCalendar(const Locale& aLocale, UErrorCode &success);
/**
* Copy Constructor
* @internal
*/
PersianCalendar(const PersianCalendar& other);
/**
* Destructor.
* @internal
*/
virtual ~PersianCalendar();
// TODO: copy c'tor, etc
// clone
virtual PersianCalendar* clone() const override;
private:
/**
* Determine whether a year is a leap year in the Persian calendar
*/
static UBool isLeapYear(int32_t year);
/**
* Return the day # on which the given year starts. Days are counted
* from the Hijri epoch, origin 0.
*/
int32_t yearStart(int32_t year, UErrorCode& status);
/**
* Return the day # on which the given month starts. Days are counted
* from the Hijri epoch, origin 0.
*
* @param year The hijri shamsi year
* @param year The hijri shamsi month, 0-based
*/
int32_t monthStart(int32_t year, int32_t month, UErrorCode& status) const;
//your_sha256_hash------
// Calendar framework
//your_sha256_hash------
protected:
/**
* @internal
*/
virtual int32_t handleGetLimit(UCalendarDateFields field, ELimitType limitType) const override;
/**
* Return the length (in days) of the given month.
*
* @param year The hijri shamsi year
* @param year The hijri shamsi month, 0-based
* @internal
*/
virtual int32_t handleGetMonthLength(int32_t extendedYear, int32_t month, UErrorCode& status) const override;
/**
* Return the number of days in the given Persian year
* @internal
*/
virtual int32_t handleGetYearLength(int32_t extendedYear) const override;
//your_sha256_hash---------
// Functions for converting from field values to milliseconds....
//your_sha256_hash---------
// Return JD of start of given month/year
/**
* @internal
*/
virtual int64_t handleComputeMonthStart(int32_t eyear, int32_t month, UBool useMonth, UErrorCode& status) const override;
//your_sha256_hash---------
// Functions for converting from milliseconds to field values
//your_sha256_hash---------
/**
* @internal
*/
virtual int32_t handleGetExtendedYear(UErrorCode& status) override;
/**
* Override Calendar to compute several fields specific to the Persian
* calendar system. These are:
*
* <ul><li>ERA
* <li>YEAR
* <li>MONTH
* <li>DAY_OF_MONTH
* <li>DAY_OF_YEAR
* <li>EXTENDED_YEAR</ul>
*
* The DAY_OF_WEEK and DOW_LOCAL fields are already set when this
* method is called. The getGregorianXxx() methods return Gregorian
* calendar equivalents for the given Julian day.
* @internal
*/
virtual void handleComputeFields(int32_t julianDay, UErrorCode &status) override;
// UObject stuff
public:
/**
* @return The class ID for this object. All objects of a given class have the
* same class ID. Objects of other classes have different class IDs.
* @internal
*/
virtual UClassID getDynamicClassID() const override;
/**
* Return the class ID for this class. This is useful only for comparing to a return
* value from getDynamicClassID(). For example:
*
* Base* polymorphic_pointer = createPolymorphicObject();
* if (polymorphic_pointer->getDynamicClassID() ==
* Derived::getStaticClassID()) ...
*
* @return The class ID for all objects of this class.
* @internal
*/
U_I18N_API static UClassID U_EXPORT2 getStaticClassID();
/**
* return the calendar type, "persian".
*
* @return calendar type
* @internal
*/
virtual const char * getType() const override;
/**
* @return The related Gregorian year; will be obtained by modifying the value
* obtained by get from UCAL_EXTENDED_YEAR field
* @internal
*/
virtual int32_t getRelatedYear(UErrorCode &status) const override;
/**
* @param year The related Gregorian year to set; will be modified as necessary then
* set in UCAL_EXTENDED_YEAR field
* @internal
*/
virtual void setRelatedYear(int32_t year) override;
private:
PersianCalendar(); // default constructor not implemented
protected:
DECLARE_OVERRIDE_SYSTEM_DEFAULT_CENTURY
};
U_NAMESPACE_END
#endif
#endif
```
|
```javascript
Symbols in ES6
Proxies
Generators as iterators in ES6
New methods in `Math`
ES6 Generator Transpiler
```
|
```javascript
// CodeMirror, copyright (c) by Marijn Haverbeke and others
// Distributed under an MIT license: path_to_url
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
function wordRegexp(words) {
return new RegExp("^((" + words.join(")|(") + "))\\b");
}
var wordOperators = wordRegexp(["and", "or", "not", "is"]);
var commonKeywords = ["as", "assert", "break", "class", "continue",
"def", "del", "elif", "else", "except", "finally",
"for", "from", "global", "if", "import",
"lambda", "pass", "raise", "return",
"try", "while", "with", "yield", "in"];
var commonBuiltins = ["abs", "all", "any", "bin", "bool", "bytearray", "callable", "chr",
"classmethod", "compile", "complex", "delattr", "dict", "dir", "divmod",
"enumerate", "eval", "filter", "float", "format", "frozenset",
"getattr", "globals", "hasattr", "hash", "help", "hex", "id",
"input", "int", "isinstance", "issubclass", "iter", "len",
"list", "locals", "map", "max", "memoryview", "min", "next",
"object", "oct", "open", "ord", "pow", "property", "range",
"repr", "reversed", "round", "set", "setattr", "slice",
"sorted", "staticmethod", "str", "sum", "super", "tuple",
"type", "vars", "zip", "__import__", "NotImplemented",
"Ellipsis", "__debug__"];
CodeMirror.registerHelper("hintWords", "python", commonKeywords.concat(commonBuiltins));
function top(state) {
return state.scopes[state.scopes.length - 1];
}
CodeMirror.defineMode("python", function(conf, parserConf) {
var ERRORCLASS = "error";
var singleDelimiters = parserConf.singleDelimiters || /^[\(\)\[\]\{\}@,:`=;\.]/;
var doubleOperators = parserConf.doubleOperators || /^([!<>]==|<>|<<|>>|\/\/|\*\*)/;
var doubleDelimiters = parserConf.doubleDelimiters || /^(\+=|\-=|\*=|%=|\/=|&=|\|=|\^=)/;
var tripleDelimiters = parserConf.tripleDelimiters || /^(\/\/=|>>=|<<=|\*\*=)/;
var hangingIndent = parserConf.hangingIndent || conf.indentUnit;
var myKeywords = commonKeywords, myBuiltins = commonBuiltins;
if (parserConf.extra_keywords != undefined)
myKeywords = myKeywords.concat(parserConf.extra_keywords);
if (parserConf.extra_builtins != undefined)
myBuiltins = myBuiltins.concat(parserConf.extra_builtins);
var py3 = !(parserConf.version && Number(parserConf.version) < 3)
if (py3) {
// since path_to_url @ is also an operator
var singleOperators = parserConf.singleOperators || /^[\+\-\*\/%&|\^~<>!@]/;
var identifiers = parserConf.identifiers|| /^[_A-Za-z\u00A1-\uFFFF][_A-Za-z0-9\u00A1-\uFFFF]*/;
myKeywords = myKeywords.concat(["nonlocal", "False", "True", "None", "async", "await"]);
myBuiltins = myBuiltins.concat(["ascii", "bytes", "exec", "print"]);
var stringPrefixes = new RegExp("^(([rbuf]|(br))?('{3}|\"{3}|['\"]))", "i");
} else {
var singleOperators = parserConf.singleOperators || /^[\+\-\*\/%&|\^~<>!]/;
var identifiers = parserConf.identifiers|| /^[_A-Za-z][_A-Za-z0-9]*/;
myKeywords = myKeywords.concat(["exec", "print"]);
myBuiltins = myBuiltins.concat(["apply", "basestring", "buffer", "cmp", "coerce", "execfile",
"file", "intern", "long", "raw_input", "reduce", "reload",
"unichr", "unicode", "xrange", "False", "True", "None"]);
var stringPrefixes = new RegExp("^(([rubf]|(ur)|(br))?('{3}|\"{3}|['\"]))", "i");
}
var keywords = wordRegexp(myKeywords);
var builtins = wordRegexp(myBuiltins);
// tokenizers
function tokenBase(stream, state) {
if (stream.sol()) state.indent = stream.indentation()
// Handle scope changes
if (stream.sol() && top(state).type == "py") {
var scopeOffset = top(state).offset;
if (stream.eatSpace()) {
var lineOffset = stream.indentation();
if (lineOffset > scopeOffset)
pushPyScope(state);
else if (lineOffset < scopeOffset && dedent(stream, state) && stream.peek() != "#")
state.errorToken = true;
return null;
} else {
var style = tokenBaseInner(stream, state);
if (scopeOffset > 0 && dedent(stream, state))
style += " " + ERRORCLASS;
return style;
}
}
return tokenBaseInner(stream, state);
}
function tokenBaseInner(stream, state) {
if (stream.eatSpace()) return null;
var ch = stream.peek();
// Handle Comments
if (ch == "#") {
stream.skipToEnd();
return "comment";
}
// Handle Number Literals
if (stream.match(/^[0-9\.]/, false)) {
var floatLiteral = false;
// Floats
if (stream.match(/^\d*\.\d+(e[\+\-]?\d+)?/i)) { floatLiteral = true; }
if (stream.match(/^\d+\.\d*/)) { floatLiteral = true; }
if (stream.match(/^\.\d+/)) { floatLiteral = true; }
if (floatLiteral) {
// Float literals may be "imaginary"
stream.eat(/J/i);
return "number";
}
// Integers
var intLiteral = false;
// Hex
if (stream.match(/^0x[0-9a-f]+/i)) intLiteral = true;
// Binary
if (stream.match(/^0b[01]+/i)) intLiteral = true;
// Octal
if (stream.match(/^0o[0-7]+/i)) intLiteral = true;
// Decimal
if (stream.match(/^[1-9]\d*(e[\+\-]?\d+)?/)) {
// Decimal literals may be "imaginary"
stream.eat(/J/i);
// TODO - Can you have imaginary longs?
intLiteral = true;
}
// Zero by itself with no other piece of number.
if (stream.match(/^0(?![\dx])/i)) intLiteral = true;
if (intLiteral) {
// Integer literals may be "long"
stream.eat(/L/i);
return "number";
}
}
// Handle Strings
if (stream.match(stringPrefixes)) {
state.tokenize = tokenStringFactory(stream.current());
return state.tokenize(stream, state);
}
// Handle operators and Delimiters
if (stream.match(tripleDelimiters) || stream.match(doubleDelimiters))
return "punctuation";
if (stream.match(doubleOperators) || stream.match(singleOperators))
return "operator";
if (stream.match(singleDelimiters))
return "punctuation";
if (state.lastToken == "." && stream.match(identifiers))
return "property";
if (stream.match(keywords) || stream.match(wordOperators))
return "keyword";
if (stream.match(builtins))
return "builtin";
if (stream.match(/^(self|cls)\b/))
return "variable-2";
if (stream.match(identifiers)) {
if (state.lastToken == "def" || state.lastToken == "class")
return "def";
return "variable";
}
// Handle non-detected items
stream.next();
return ERRORCLASS;
}
function tokenStringFactory(delimiter) {
while ("rubf".indexOf(delimiter.charAt(0).toLowerCase()) >= 0)
delimiter = delimiter.substr(1);
var singleline = delimiter.length == 1;
var OUTCLASS = "string";
function tokenString(stream, state) {
while (!stream.eol()) {
stream.eatWhile(/[^'"\\]/);
if (stream.eat("\\")) {
stream.next();
if (singleline && stream.eol())
return OUTCLASS;
} else if (stream.match(delimiter)) {
state.tokenize = tokenBase;
return OUTCLASS;
} else {
stream.eat(/['"]/);
}
}
if (singleline) {
if (parserConf.singleLineStringErrors)
return ERRORCLASS;
else
state.tokenize = tokenBase;
}
return OUTCLASS;
}
tokenString.isString = true;
return tokenString;
}
function pushPyScope(state) {
while (top(state).type != "py") state.scopes.pop()
state.scopes.push({offset: top(state).offset + conf.indentUnit,
type: "py",
align: null})
}
function pushBracketScope(stream, state, type) {
var align = stream.match(/^([\s\[\{\(]|#.*)*$/, false) ? null : stream.column() + 1
state.scopes.push({offset: state.indent + hangingIndent,
type: type,
align: align})
}
function dedent(stream, state) {
var indented = stream.indentation();
while (state.scopes.length > 1 && top(state).offset > indented) {
if (top(state).type != "py") return true;
state.scopes.pop();
}
return top(state).offset != indented;
}
function tokenLexer(stream, state) {
if (stream.sol()) state.beginningOfLine = true;
var style = state.tokenize(stream, state);
var current = stream.current();
// Handle decorators
if (state.beginningOfLine && current == "@")
return stream.match(identifiers, false) ? "meta" : py3 ? "operator" : ERRORCLASS;
if (/\S/.test(current)) state.beginningOfLine = false;
if ((style == "variable" || style == "builtin")
&& state.lastToken == "meta")
style = "meta";
// Handle scope changes.
if (current == "pass" || current == "return")
state.dedent += 1;
if (current == "lambda") state.lambda = true;
if (current == ":" && !state.lambda && top(state).type == "py")
pushPyScope(state);
var delimiter_index = current.length == 1 ? "[({".indexOf(current) : -1;
if (delimiter_index != -1)
pushBracketScope(stream, state, "])}".slice(delimiter_index, delimiter_index+1));
delimiter_index = "])}".indexOf(current);
if (delimiter_index != -1) {
if (top(state).type == current) state.indent = state.scopes.pop().offset - hangingIndent
else return ERRORCLASS;
}
if (state.dedent > 0 && stream.eol() && top(state).type == "py") {
if (state.scopes.length > 1) state.scopes.pop();
state.dedent -= 1;
}
return style;
}
var external = {
startState: function(basecolumn) {
return {
tokenize: tokenBase,
scopes: [{offset: basecolumn || 0, type: "py", align: null}],
indent: basecolumn || 0,
lastToken: null,
lambda: false,
dedent: 0
};
},
token: function(stream, state) {
var addErr = state.errorToken;
if (addErr) state.errorToken = false;
var style = tokenLexer(stream, state);
if (style && style != "comment")
state.lastToken = (style == "keyword" || style == "punctuation") ? stream.current() : style;
if (style == "punctuation") style = null;
if (stream.eol() && state.lambda)
state.lambda = false;
return addErr ? style + " " + ERRORCLASS : style;
},
indent: function(state, textAfter) {
if (state.tokenize != tokenBase)
return state.tokenize.isString ? CodeMirror.Pass : 0;
var scope = top(state), closing = scope.type == textAfter.charAt(0)
if (scope.align != null)
return scope.align - (closing ? 1 : 0)
else
return scope.offset - (closing ? hangingIndent : 0)
},
electricInput: /^\s*[\}\]\)]$/,
closeBrackets: {triples: "'\""},
lineComment: "#",
fold: "indent"
};
return external;
});
CodeMirror.defineMIME("text/x-python", "python");
var words = function(str) { return str.split(" "); };
CodeMirror.defineMIME("text/x-cython", {
name: "python",
extra_keywords: words("by cdef cimport cpdef ctypedef enum except"+
"extern gil include nogil property public"+
"readonly struct union DEF IF ELIF ELSE")
});
});
```
|
Tig n' Seek (originally called Tiggle Winks) is an American animated television series created by Myke Chilian for Cartoon Network. Prior to the series, Chilian served as a designer on Rick and Morty as well as a writer and storyboard artist on Uncle Grandpa. The series is produced by Cartoon Network Studios. It was originally set to premiere on Cartoon Network, but was moved to the then-upcoming streaming service, HBO Max. The first season premiered on July 23, 2020, on the streaming service. The series was renewed for a second season which premiered on March 11, 2021. The series made its linear premiere on Cartoon Network on August 6, 2021, with some episodes aired out of order. The third season was released on September 16, 2021, only six months after the show's second season premiered. The fourth season was released on May 26, 2022. The show also was the last to feature the voice of Louie Anderson before his death on January 21, 2022. On August 18, 2022, the series was removed from HBO Max.
Premise
Tig n' Seek follows the adventures of Tiggy, an 8-year-old detective, and his cat Gweeseek as they solve cases and retrieve lost items at the Department of Lost & Found.
Cast
Main cast
Myke Chilian as Tiggy
Jemaine Clement as This Guy
Rich Fulcher as Boss
Wanda Sykes as Nuritza
Vatche Panos as Prangle Penguin
Kari Wahlgren as Gweeseek (pilot)
Supporting cast
Maryann Strossner as Mrs. Grendelsons
Vartui Rosie Chilian as Rosie Penguin
Grey DeLisle as Carla Tetrazzini
Louie Anderson as Chester
Kayla Melikian as Skippy
Guest stars
Kate Freund as Linda Buckles
James Adomian as H. G. Fluffenfold
Sam Jay as Captain Delilah
Zach Hadel as Darryl Barryl
Episodes
Series overview
Season 1 (2020)
Season 2 (2021)
Season 3 (2021)
Season 4 (2022)
Production
The show was originally part of the Cartoon Network Shorts Program in 2015, before being greenlight for a series in May 2019. Rough Draft Studios handles most of the animation for the series, which is done through traditional animation techniques at the studio in Seoul, South Korea.
International broadcast
In Canada, the series premiered on Teletoon on November 8, 2020.
References
External links
(archived)
Tig n Seek Online
2020 American television series debuts
2020s American animated television series
2022 American television series endings
American children's animated adventure television series
American children's animated comedy television series
Animated television series about children
Television series by Cartoon Network Studios
Cartoon Network original programming
English-language television shows
HBO Max original programming
Television series set on fictional islands
|
Someone Here Is Missing is the eighth studio album by The Pineapple Thief, featuring cover art by Storm Thorgerson.
Track listing
All songs written by Bruce Soord.
Personnel
Bruce Soord: vocals, acoustic & electric guitars, keyboards, programming
Steve Kitch: keyboards, synthesizers
Jon Sykes: acoustic & electric bass, vocals
Keith Harrison: drums, percussion, vocals
Production
Arranged by The Pineapple Thief
Produced & engineered by Bruce Soord
Mixed by Mark Bowyer & Steve Kitch
Mastered by Dave Turner
Artwork by designer Storm Thorgerson
References
External links
The Pineapple Thief's official website
Someone Here Is Missing microsite
2010 albums
The Pineapple Thief albums
Kscope albums
Albums with cover art by Storm Thorgerson
|
```java
/*
*
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package org.apache.traffic_control.traffic_router.geolocation;
import java.io.File;
import java.io.IOException;
public interface GeolocationService {
/**
* Provides a geospatial location for a specified IP Address.
*
* @param ip
* @return the location of the specified IP Address
* @throws GeolocationException
* if the IP Address cannot be located.
*/
Geolocation location(String ip) throws GeolocationException;
/**
* Forces a reload of the geolocation database.
*
* @throws IOException
*/
void reloadDatabase() throws IOException;
/**
* Verifies the specified database is valid.
*
* @param dbFile
* the database file.
* @throws IOException
* if the database is not valid.
*/
boolean verifyDatabase(File dbFile) throws IOException;
/**
* Exposes whether this GeolocationService has loaded
*
* @return whether this GeolocationService has loaded
*/
boolean isInitialized();
void setDatabaseFile(File databaseFile);
}
```
|
The Advanced Congress of Democrats (ACD) was an opposition political party in Nigeria, created and first registered in March 2006. In September 2006, in merged into the newly formed Action Congress launched in 2005.
The ACD was primarily composed of former People's Democratic Party members, and was one of a series of anti-Obasanjo coalitions, beginning with the Movement for the Defence of Democracy in 2005, and followed by the AC in 2006/2007. It maintained a small independent organization after the 2007 elections, while its leaders had merged into the AC.
The party was formed by opponents of a proposed constitutional amendment that would allow incumbent President Olusegun Obasanjo to seek a third term in office, and had its base of support in the south west of Nigeria.
The then Vice-President Atiku Abubakar, a northerner who opposed a third term for Obasanjo, was believed to back the new party from its inception.
The ACD was largely made up of disgruntled PDP members who felt they had lost power and patronage to the President's supporters. Attempts by the President's supporters to amend the constitutional two term limit, allowing President Obasanjo to continue in office for a further four years, led to a rupture of the underlying tension within the party.
Three of the founders of the ACD, Alhaji Lawal Kaita, Alhaji Bashir Dalhatu and Audu Ogbeh, are former PDP politicians, and have complained of harassment and detention by the government since the ACD's founding. In March, ACD chair Alhaji Lawal Kaita, the former PDP governor of Kaduna state was detained shortly after a party rally was shut down by police in Dutse, Jigawa State.
The Vice President, who had previously shown no interest in running for president, was in 2006 the main focus of these former PDP politicians. He was widely expected to be a future Presidential candidate of the ACD.
In September 2006, the ACD led the creation of a coalition with the Alliance for Democracy, the Justice Party, and several other minor political parties, and forming the Action Congress. Atiku Abubakar was its presidential candidate in the 2007 General Election.
References
ACD Party Website.
Nigeria Electoral Commission, Party information.
Daily Sun, AD/ACD: United against third-term, April 29, 2006.
Daily Sun, Corrupt people won’t ever rule Nigeria – EFCC• ACD dares commission: July 19, 2006.
Arrest of ACD Leader, 2006.
BBC News: Thousands launch Nigerian party
Defunct political parties in Nigeria
2006 establishments in Nigeria
Political parties established in 2006
Political parties with year of disestablishment missing
|
Pedro is a populated place in Pennington County, South Dakota, United States. Pedro once had a population of 300 and had its own newspaper, the Pedro Bugle, but is now a ghost town.
History
The community's name was selected during a session of the card game Pedro.
Carrie Ingalls (Little House on the Prairie) worked for E.L. Senn (who owned as many as fifty-one newspapers in South Dakota at that time) in Pedro, which was not too far from her claim. By the summer of 1909, she was the manager working for Senn at the Pedro Bugle
References
Ghost towns in South Dakota
Populated places in Pennington County, South Dakota
|
Shiva Regmi () (1965 – 9 December 2013) was a Nepali film director, producer and screenwriter. His debut film as a director was Aafanta, which was released in 1999, and his last film was So Simple, which was released in 2012. He has worked on over one hundred films. Regmi received the National Film Awards for best director in 2010 for his work on the film Duniya, and again in 2012 for his work on Kaha Bhetiyela. In 2013, he won the National Film Awards for best writer for his work on the film Hasi Deu Ek Phera. He is regarded as one of Nepal's earliest filmmakers to give female characters some significant roles at a time when traditional audiences tended to see the male lead as the only significant character in a movie.
Career
Early career
Born in Chitwan, Nepal, Regmi started his career as a theatrical actor, director and writer. He worked at Narayani kala mandir for many years, writing, directing and acting in many of his own plays. He won the National award for best drama writer for Swades Bhitra Harayeko Nagarik in 1993. This play, along with Pukaar brought Regmi national attention, and both were performed widely throughout Nepal. His first film work was as a writer for Prem Puja but he did not receive wide recognition as a film writer until his success with Mohoni. After writing the scripts for several movies he debuted as a director with Aafanta. He followed this with several more films, including Aafno Manchhe, Yeh Mero Hajur, Sukha Dukha, Haami Teen Bhai, Kaha Bhetiyela and many more. After this he wrote scripts for many movies like Allare, Dahijo, Maiti, Gorkhali, Dharti and so on. He also appeared as a supporting actor in Sindoor Poote along with Rajesh Hamal.
Mainstream breakthrough (1999–2002)
In 1999 Regmi's most ambitious script, Aafanta, was rejected by many directors, so he chose to direct the film himself. After the success of Aafanta, he reassembled the cast for Aafno Manchhe, but this time he included Dilip Rayamajhi and Bipana Thapa in the lead. Aafno Manchhe was the huge success, and is counted among the biggest hit movies of Nepali film cinema. He got Film Awards for Best Dialogue for the movie. His next film was Ye Mero Hajur. He again turned to star Shree Krishna Shrestha; the movie was a box office hit. In 2002 he directed Sukha Dukha: an emotional family drama which was again a huge hit at the box office. He received the Lux Award for best director and motion picture award for best story.
Regmi's experimental movie Paahuna was a moderate success and Upahaar was a flop that came in 2004. In 2004 he returned with big hit movies like Hami Tin Bhai and Muglaan. Nikhil Upreti got his first national award for best actor for his performance in Haami Teen Bhai. Regmi's first films as a producer, Maanis and Duniya, were box office failures although both were critically acclaimed. Regmi received the NEFTA award for best director for Maanis and his first national award for best director for Duniya in 2005. His next movies, Ram Balram and Yuddha were below average at box office. But in 2008 his MaHa project Kaha Bhetiyela was one of the biggest hits of his career; he received his second national award for best director and NEFTA award for best popular director of 2008. His first movie with MaHa (Madan Krishna Shrestha and Hari Bansha Acharya) in 2010 was below average but he received his third national award for best writer. Saathi Ma Timro in 2011 was a box office success. His new film Phool released on 4 May 2012. So simple was released on the festival eve of Bada dashain along with other 3 movies. Box office report was below the mark but Regmi was critically praised for his work. While his latest projects, Mann Le Mann Lai Chhunchha and Paraai released later in 2013.
Shiva Regmi's Plays
Filmography
Aafanta
Immense love for cinema brought Regmi to mainstream movie-making from plays. He was always sure about the theme of his movie to be social drama. Movie was praised especially for the good story and strong performance of Shree krishna shrestha and Niruta singh. Niruta singh won Motion picture award for best actress that year(2000A.D) from this movie. Regmi was acknowledged with the best story and best director nomination. The movie was a smashed hit. Songs like pauju ko chham chham is still hit among cine lovers.
Aafno Manchhe
Regmi reunited with Shree Krishna Shrestha and Niruta Singh along with Bipana Thapa and Dilip Rayamajhi with social drama story. This movie is taken as one of the cult masala movie in Nepali film industry. This movie is considered as all-time blockbuster movie of Nepal. It recorded 51 days celebration in 9 cinema theatre in Kathmandu only and 100 days run in various places in Nepal. Chatta rumal kya malum song had created euphoria in Nepal. This song was recreated in 2019 in movie Yatra, which was again a huge hit song. Regmi won his first prominent award, Motion pictures award for best dialogue and gained the nomination for best direction.
Haami Teen Bhai
The film features three of biggest Superstar actors of late 90s and 2000s era- Rajesh Hamal, Shree Krishna Shrestha and Nikhil Upreti in the lead roles supported by Rekha Thapa, Jharana Thapa, Nandita KC, Keshab Bhattrai, Sushila Rayamajhi, Ravi Giri etc. The movie was critically and commercially successful, with many critics highlighting its screenplay, music comedy and the actors' performance especially of its lead actors but over the top action scenes were criticized. It is one of the highest-grossing films in Nepali film history . It is considered a classic movie by fans. Nikhil Upreti received the Best Actor in First National Films Award 2062 BS for the role of younger brother Laxman/Abhishek.
Personal
Shiva Raj Regmi is the eldest son of Late Khaga Raj Regmi and Bhakta Maya Regmi. He was born in Baraghare in Chitwan District, Nepal in 1965. He spent his childhood in Chitwan and Gorkha with Ankit Regmi. He married Gyanu Regmi in 1985; together they have four children: Samjhana, Bhanawan, Abhishesh and Pallabi Regmi. Bhawana Regmi has started her own film career, appearing in So Simple, directed by Shiva Regmi.
Death
In March 2013, Regmi was admitted to Om Hospital in Kathmandu due to general illness symptoms such as diarrhea and vomiting. During a health checkup, it was discovered that his creatinine levels were high and recommended for peritoneal dialysis in May 2013.
In August 2013, Regmi experienced a stroke and had transient loss of vision . On August 14, 2013, he traveled to Delhi, India, for further treatment at Sir Gangaram Hospital. The doctors there advised him to take anti-tubercular drugs due to persistent fever of unknown origin and his compromised immune system. Regmi returned to Nepal after a two-week stay in India.
A renal transplant was scheduled for November 14, 2013. However, in October 2013, he had a seizure, and the cause remained undiagnosed. A few days later, he suffered a stroke that paralyzed the left half of his body (hemiplegia). Wanting to visit his hometown of Chitwan, he was transported there by ambulance. After a few days, he experienced difficulty breathing and was taken to CMS Hospital in Chitwan. Unfortunately, due to the Laxmi Puja period, doctors were not available. After five days, he was diagnosed with nosocomial pneumonia. Ventilatory support was provided on November 7. Despite unsuccessful treatment attempts at CMS Hospital, Regmi was airlifted to Vayoda Hospital in Balkhu, Kathmandu, on November 27. The plan was to transfer him to Max Hospital in Delhi, India, on December 9. However, he succumbed to kidney failure on December 9, 2013, at the age of 49 in Kathmandu.
legacy
Regmi,left behind a remarkable legacy that continues to resonate in the Nepali film industry even after his passing. Regmi's contribution to the industry is evident in his successful films, particularly his directorial masterpiece, "Ye Mero Hajur," which has spawned multiple sequels and become a beloved franchise.
After Shiva Regmi's death, his directed movie "Ye Mero Hajur" captivated audiences and achieved tremendous success at the box office. The film resonated so well with viewers that it led to the creation of sequels, namely "Ye Mero Hajur 2," "Ye Mero Hajur 3," and "Ye Mero Hajur 4." Each of these subsequent installments proved to be box office hits, carrying forward the essence and popularity of the original film.
One notable aspect that connects the sequels to the original "Ye Mero Hajur" is the inclusion of the iconic song "Fulako Thunga Houki." This song, with its melodious tune and meaningful lyrics, struck a chord with audiences and became a timeless piece in the Nepali film industry. Its presence in the sequels further solidified the connection between the films and continued to enchant fans.
Beyond "Ye Mero Hajur," Shiva Regmi's filmography boasts several other noteworthy contributions that have garnered a cult following over time. Movies such as "Hami Teen Bhai," "Sukha Dukha," "Aafno Manchhe," and "Kaha Bhetiyela" have emerged as cult classics in the Nepali film industry. These films, each unique in its storytelling and thematic exploration, have resonated with audiences and have stood the test of time.
Regmi's films are cherished for their engaging narratives, relatable characters, and emotional depth. They often delve into societal issues and touch upon human emotions, striking a chord with viewers on a personal level. Through his directorial prowess, Shiva Regmi left an indelible mark on Nepali cinema, shaping its landscape and contributing to its artistic growth.
Shiva Regmi's legacy continues to inspire and influence aspiring filmmakers in Nepal. His work serves as a testament to the power of storytelling and the impact that films can have on society. The enduring popularity of his films, including the "Ye Mero Hajur" franchise and his other cult classics, is a testament to the lasting impact of his creative vision. Shiva Regmi will always be remembered as a visionary filmmaker who touched the hearts of millions with his storytelling prowess and contributed significantly to the Nepali film industry.
References
External links
Nepalese film directors
2013 deaths
1965 births
People from Chitwan District
Deaths from kidney failure
21st-century Nepalese screenwriters
Khas people
|
Rene Cruz was a Filipino sports administrator and police official. He served as the president of the Philippine Olympic Committee (POC) from 1993 to 1996.
Prior to being a POC president, Cruz served as vice president in 1989. Under his term as POC President, the Philippines won a silver medal at the 1996 Summer Olympics in Atlanta. The country would have an Olympic medal drought until the 2016 edition. He also served as the head of the Philippine Badminton Association.
Cruz died at the Asian Hospital and Medical Center in Alabang, Muntinlupa due to cardiac arrest on September 29, 2015. He was 85 years old.
References
2015 deaths
Filipino sports executives and administrators
Filipino police officers
|
This is a list of films which placed number one at the weekly box office in Turkey during 2019. The weeks start on Fridays, and finish on Thursdays. The box-office number one is established in terms of tickets sold during the week.
Box office number-one films
Highest-grossing films
In-Year Release
References
2019
Turkey
2019 in Turkish cinema
|
Emigration from the United States is the process where citizens from the United States move to live in other countries, creating an American diaspora (overseas Americans). The process is the reverse of the immigration to the United States. The United States does not keep track of emigration, and counts of Americans abroad are thus only available based on statistics kept by the destination countries.
History
Due to the flow of people back and forth between the United Kingdom and its colonies, as well as between the colonies, there has been an American diaspora of a sort since before the United States was founded. During and immediately after the American Revolutionary War, a number of American Loyalists relocated to other countries, chiefly Canada and the United Kingdom. Residence in countries outside the British Empire was unusual, and usually limited to the wealthy, such as Benjamin Franklin, who was able to self-finance his trip to Paris as a U.S. diplomat.
18th century
After the American Revolutionary War, some 3,000 Black Loyalists - men who escaped enslavement by Patriot masters and served on the Loyalist side because of the Crown's guarantee of freedom - were evacuated from New York to Nova Scotia; they were individually listed in the Book of Negroes as the British gave them certificates of freedom and arranged for their transportation. The Crown gave them land grants and supplies to help them resettle in Nova Scotia. Other Black Loyalists were evacuated to London or the Caribbean colonies.
Thousands of slaves escaped from plantations and fled to British lines, especially after British occupation of Charleston, South Carolina. When the British evacuated, they took many former slaves with them. Many ended up among London's Black Poor, with 400 resettled by the Sierra Leone Company to Freetown in Africa in 1787. Five years later, another 1,192 Black Loyalists from Nova Scotia chose to emigrate to Sierra Leone, becoming known as the Nova Scotian settlers in the new British colony of Sierra Leone. Both waves of settlers became part of the Sierra Leone Creole people and the founders of the nation of Sierra Leone.
19th century
Thanks to the increase of whalers and clipper ships, Americans began to travel all over the world for business reasons.
The early 19th century also saw the beginning of overseas religious missionary activity, such as with Adoniram Judson in Burma.
During the War of 1812, some African American slaves joined the Corps of Colonial Marines to fight against the United States. Their reward was guaranteed emancipation (as per the Mutiny Act 1807) and new land set aside for them in southern Trinidad. They and their descendants later became known as the Merikins.
The middle of the 19th century saw the immigration of many New Englanders to Hawaii, as missionaries for the Congregational Church, and as traders and whalers. The American population eventually overthrew the government of Hawaii, leading to its annexation by the United States.
During this time the American Colonization Society established a colony in the Pepper Coast for freedmen known as Liberia. The ACS's main goals were to Christianize indigenous Africans, end the illegal slave trade, and resettle African Americans out of the United States. Their descendants became the Americo-Liberians, who dominated the country for most of its history.
During the early 19th century, particularly between 1824 and 1826, thousands of free blacks emigrated from the United States to Haiti to escape antebellum segregation and racist policy. They primarily settled in Samana Province, where their descendants still live today as the Samana Americans. They speak their own variety of English called Samana English.
During the American Civil War, President Lincoln asked Kansas Senator Samuel C. Pomeroy and Secretary of the Interior Caleb Blood Smith to develop a plan to resettle African Americans out of the United States. Pomeroy had come up with the idea of Linconia, a freedmen colony much like Liberia in modern Chiriqui Province, Panama. After nearby Central American nations expressed their opposition to the project, it was quickly scrapped. However, 453 African workers were sent to Ile-à-Vache in Haiti as part of a private colonization effort run by entrepreneur Bernard Kock. This colony was short-lived due to Kock breaking the contract. By the end of 1863, all of the colonists had returned to the United States.
After the Civil War, thousands of Southerners moved to Brazil, where slavery was still legal at the time. They founded a city called Americana and became known as Confederados. Some also migrated to Mexico, where they established the New Virginia Colony with the help of Emperor Maximilian I of Mexico. They founded their capital, Carlota, and had planned to make more settlements, but the colony was abandoned after the fall of the Second Mexican Empire, and most of the settlers returned to the U.S. There was also a sizeable presence of ex-confederates in British Honduras, now known as Belize.
In Asia, the U.S. government made efforts to secure special privileges for its citizens. This began with the Treaty of Wanghia in China in 1844. It was followed by the expedition of Commodore Perry to Japan 10 years later, and the United States–Korea Treaty of 1882. American traders began to settle in those countries.
Early 20th century
Many Americans migrated to the Philippines after it became a U.S. territory following the Philippine–American War.
Cecil Rhodes created the Rhodes Scholarship in 1902 to encourage greater cooperation between the United States, the British Empire and Germany by allowing students to study abroad.
Interwar period
In the period between the First and Second World Wars, many Americans, particularly writers such as Ernest Hemingway, Gertrude Stein, and Ezra Pound, migrated to Europe to take part in the cultural scene.
European cities like Amsterdam, Berlin, Copenhagen, Paris, Prague, Rome, Stockholm, and Vienna came to host a large number of Americans. Many Americans, typically those who were idealistic and/or involved in left-leaning politics, also participated in the Spanish Civil War (mainly supporting the Republicans against the Nationalists) in Spain while they lived in Madrid and elsewhere.
Other Americans returned home to the countries of their origin, including the parents of American author/illustrator Eric Carle, who returned to Germany. Thousands of Japanese Americans were unable to return to the United States, after the Attack on Pearl Harbor.
Éamon de Valera, the third taoscieach of Ireland during the 1930s, was born in New York to an Irish mother and a Spanish father. He moved to Ireland at a young age with his mother's family.
Cold War
During the Cold War, Americans became a permanent fixture in many countries with large populations of American soldiers, such as West Germany and South Korea.
The Cold War also saw the development of government programs to encourage young Americans to go abroad. The Fulbright Program was established in 1946 to encourage cultural exchange, and the Peace Corps was created in 1961 both to encourage cultural exchange and a civic spirit of volunteerism.
With the formation of the state of Israel, over 100,000 Jews made aliyah to the holy land, where they played a role in the creation of the state. Other Americans traveled to countries like Lebanon, again to take place in the cultural scene.
During the Vietnam War, about 100,000 American men went abroad to avoid conscription, 90% of them going to Canada. European nations, including neutral states like Denmark, Norway, Sweden, and Switzerland, offered asylum to thousands of American expatriates who refused to fight.
A small number of Americans abandoned the country for political reasons, defecting to the Soviet Union, Cuba, or other countries, such as Miguel d'Escoto Brockmann, and sixties radicals such as Joanne Chesimard, Pete O'Neal, Eldridge Cleaver, and Stokely Carmichael.
During this period Americans continued to travel abroad for religious reasons, such as Richard James, inventor of the Slinky, who went to Bolivia with the Wycliffe Bible Translators, and the Peoples Temple establishment of Jonestown in Guyana.
After the Cold War
The opening of Eastern Europe, Central Europe, and Central Asia after the Cold War provided new opportunities for American businesspeople. Additionally, with the global dominance of the United States in the world economy, the ESL industry continued to grow, especially in new and emerging markets. Many Americans also take a year abroad during college, and some return to the country after graduation.
21st century
Iraq War deserters sought refuge mostly in Canada and Europe, and NSA whistleblower Edward Snowden escaped to Russia.
Increasing numbers of Americans retire abroad due to financial setbacks resulting from the 2008 financial crisis.
Young Americans facing a tough job market due to the recession are also increasingly open to working abroad.
According to a Gallup poll from January 2019, 16% of Americans, including 40% of women under the age of 30, would like to leave the United States. In 2018, the Federal Voting Assistance Program estimated a total number of 4.8 million American civilians lived abroad, 3.9 million civilians, plus 1.2 million service members and other government-affiliated Americans.
Reasons for emigrating
There are many reasons why Americans emigrate from the United States. Economic reasons include job or business opportunities, or a higher standard of living in another country. Others emigrate due to marriage or partnership to a foreigner, for religious or humanitarian purposes, or to seek adventure or experience a different culture. Many decide to retire abroad seeking a lower cost of living, especially more affordable health care. Immigrants to the United States may decide to rejoin family members in their countries of origin. Other reasons include political dissatisfaction, safety concerns and cultural issues such as racism. Some Americans may also emigrate to evade legal liabilities; a common past case was evasion of mandatory military service.
In addition to Americans who choose to emigrate as adults, many children are born in the United States to foreign temporary workers or international students and naturally move with their parents when they return to their countries of origin. Due to their acquisition of U.S. citizenship by birth but no significant connection to the country, they are sometimes called "accidental Americans".
Destinations with facilitated access
One reason the U.S. diaspora is unusually small relative to its home population is that it is generally much more difficult for Americans to emigrate to a foreign country than, for example, citizens of countries in the Schengen Zone; similar to most other large countries, Americans looking for economic opportunity are generally limited to transmigration within the U.S.
In addition to U.S. territories, U.S. citizens have the right to reside in the Marshall Islands, Micronesia and Palau due to a Compact of Free Association between the United States and each of these countries. They may also freely move to Svalbard due to its open migration policy, as long as they are able to obtain housing and means of support there. All of these jurisdictions, however, are tiny, with fewer than a half million people combined.
Americans with parents or ancestors from certain countries, such as Germany, Ireland and Italy, may be able to claim nationality via jus sanguinis and therefore move there freely. Germany and Austria also have an easier path to citizenship for descendants of victims of Nazi crimes, even if ius sanguinis does not apply in the specific case. Similarly, American Jews may move to Israel under its Law of Return.
The USMCA (and previously NAFTA) allows U.S. citizens to work in Canada and Mexico in business or in certain professions, with few restrictions. However, to obtain permanent residence they must still satisfy the regular immigration requirements in these countries.
Net effect
The United States is a net immigration country, meaning more people arrive in the U.S. than leave it. There is a scarcity of official records in this domain. Given the high dynamics of the emigration-prone groups, emigration from the United States remains indiscernible from temporary country leave. There are a few countries in the Caribbean which had very high migration rates to the United States in the 1980s and 1990s but recorded higher population totals in recent years, indicating significant return migration from the U.S., such as Trinidad and Tobago between its 2000 and 2011 censuses.
Citizenship
Anyone born in the United States, with the sole exception of those born to foreign diplomats, acquires U.S. citizenship at birth. Those born abroad to at least one American parent also acquire U.S. citizenship if the parent had lived in the United States for a certain number of years. Immigrants to the United States may also become U.S. citizens by naturalization.
In the past it was possible for Americans abroad to lose U.S. citizenship involuntarily, but after Supreme Court decisions such as Afroyim v. Rusk and Vance v. Terrazas, along with corresponding changes in U.S. law, they can only lose U.S. citizenship in a very limited number of ways, most commonly by expressly renouncing it at a U.S. embassy or consulate.
Historically, few Americans renounced U.S. citizenship per year, but the numbers drastically increased after 2010 when the U.S. government enacted the Foreign Account Tax Compliance Act, requiring foreign banks to report information on American holders of bank accounts located outside the United States. More than 3,000 Americans renounced U.S. citizenship in 2013, many citing the financial disclosure requirements and difficulty in finding banks willing to accept them as customers. More than 5,000 renounced in 2016, and more than 6,000 did in 2020.
Issues
One of the biggest issues with the American diaspora is double taxation. Unlike almost all countries in the world, the United States taxes its citizens even if they do not live in the country. The foreign earned income exclusion mitigates double taxation on income from work, but the Internal Revenue Code treats ordinary foreign savings plans held by residents of foreign countries as if they were offshore tax avoidance instruments and requires extensive asset reporting, resulting in significant costs for Americans at all income levels to comply with filing requirements even when they owe no tax. Even Canada's Registered Disability Savings Plan falls under such reporting requirements. The most prominent piece of legislation which has attracted the ire of Americans abroad is the Foreign Account Tax Compliance Act (FATCA). Disadvantages stemming from FATCA, such as hindering career advancement overseas, may decrease the number of Americans in the diaspora in future years. The problem is so severe that some Americans have addressed it by renouncing or relinquishing their U.S. citizenship. Since 2013, the number of people giving up US citizenship has risen to a new record each year, with an unprecedented 5,411 in 2016, up 26% from the 4,279 renunciations in 2015.
Statistics
There are no exact figures on how many Americans live abroad. The United States Census Bureau does not count Americans abroad, and individual U.S. embassies offer only rough estimates.
In 1999, a Department of State estimate suggested that the number of Americans abroad may be between three million and six million. In 2016, the agency estimated 9 million U.S. citizens were living abroad, but these numbers are highly open to dispute as they often are unverified and can change rapidly.
According to the Federal Voting Assistance Program (FVAP), the Department of State's estimates are inflated on purpose as their purpose is to prepare for emergencies. FVAP makes its own detailed estimates of the number of U.S. citizens abroad, by region and by country, and of those who are of voting age, based on a variety of sources such as censuses of other countries and U.S. tax and social security records. In 2018, it estimated about 4.8 million U.S. citizens abroad, of whom about 2.9 million were of voting age. FVAP's estimates also fluctuate significantly, for example it had estimated about 5.5 million in 2016.
The United Nations estimates the number of migrants by origin and destination of all countries and territories. In 2019, the organization estimated that about 3.2 million people from the United States were living elsewhere. This number is mostly based on country of birth recorded in censuses, so it does not include U.S. citizens who were not born in the United States, such as those who acquired U.S. citizenship by descent or naturalization.
One indicator of the U.S. citizen population overseas is the number of Consular Reports of Birth Abroad requested by U.S. citizens from a U.S. embassy or consulate as a proof of U.S. citizenship of their children born abroad. The Bureau of Consular Affairs reported issuing 503,585 such documents over the decade 2000–2009. Based on this, and on some assumptions about the family composition and birth rates, some authors estimate the U.S. civilian population overseas as between 3.6 and 4.3 million.
Sizes of certain subsets of U.S. citizens living abroad can be estimated based on statistics published by the Internal Revenue Service (IRS). U.S. citizens with income above a certain level are required to file a U.S. income tax regardless of where they reside. During 2019, the IRS recorded about 739,000 U.S. tax returns filed with a foreign address, representing about 1.3 million people including spouses and dependents. Other indicators are the number of U.S. tax returns with a partial exclusion on income from work abroad (about 476,000 in 2016) and those reporting foreign income other than passive income (about 1.5 million in 2016), but not all of these were from people actually residing abroad full-time.
Estimates by country
The list below is of the main countries hosting American populations. Those shown with exact counts are enumerations of Americans who have immigrated to those countries and are legally resident there, does not include those who were born there to one or two American parents, does not necessarily include those born in the U.S. to parents temporarily in the U.S. and moved with parents by right of citizenship rather than immigration, and does not necessarily include temporary expatriates.
– 899,311 United States-born residents of Mexico (2017)
–21,000 (2019)
– 800,000 (2013; all EU countries combined)
– 738,203 (2011)
– 700,000 according to a press release from the White House on 12/06/2017
– 600,000 (2015)
– 400,000 (2020)
– 260,000
Israel – 185,000
– 158,000 (2013)
– 140,222 (2016)
– 109,450 (2021)
– 100,619 (2008)
– 88,000 (2011)
– 15,000
– 71,493 (2010, Mainland China only)
– 54,000
– 48,225
– 60,000
– 52,486
– 47,408 (2021)
– 40,000
– 38,000
– 36,000
– 36,000
– 32,000
– 31,000 to 60,000
– 25,000
– 25,000
– 20,000
– 17,748 (2006)
– 16,555 (2009)
– 15,000
– 15,000
– 15,000
– 14,000
– 13,000
– 12,475 (2006)
– 11,000
– 10,552
— 10,409 (2017)
– 10,000
– 9,634 (2018)
– 9,510 (2019; 7,131 have residence permit for 12+ months)
– 130,000 to 170,000
– 8,013 (2012)
– 8,000
– 7,500
– 7,000
– 7,000
– 6,000
– 6,000
– 6,000
– 6,000
Finland – 5,576
– 5,417 (2010)
– 5,000
– 5,000
– 5,000
– 4,768 (2022)
– 4,000
– 4,000
– 4,000
– 4,000
– 3,000
– 3,000
– 3,000
– 3,000
– at least 2,008 up to 6,200
– at least 2,000
– 2,000
– 3,000
– 3,000
– 2,000
– 2,000
– 2,000
– 1,000
See also
Immigration to the United States
Americans in India
American Colombians
American Canadians
American Mexicans
Americans in Cuba
American Brazilians
Americans in the United Kingdom
American Australians
American New Zealanders
Americans in France
Americans in the Philippines
Americans in Japan
Americo-Liberian people
Sierra Leone Creole people
Americans living in Saudi Arabia
American settlement in the Philippines
Lost Generation
Mexicans of American descent
Confederados of Brazil
Taxation of non resident Americans
American Citizens Abroad
Taxation of United States persons
International taxation
Relinquishment of United States nationality
Samaná English
List of Americans who married international nobility
Americans in Haiti
Americans in Costa Rica
Americans in Germany
Americans in the United Arab Emirates
Americans in Uruguay
Americans in Ireland
Americans in Qatar
Americans in Taiwan
Americans in China
Americans in Guatemala
References
External links
The American Diaspora, Esquire, 26 September 2008.
Jones, Chris. The New American. Esquire, 23 September 2008.
Sappho, Paul. A Looming American Diaspora, Harvard Business Review, 2009.
Sullivan, Andrew. The New American Diaspora The Atlantic, 29 September 2009.
Go East Young Moneyman, The Economist, 14 April 2011.
William Curtis Donovan. The Coming American Diaspora, 1 October 2008.
Social history of the United States
History of the United States by topic
Cultural history of the United States
|
```java
package org.bouncycastle.cms;
import java.io.IOException;
import java.io.InputStream;
interface CMSReadable
{
public InputStream getInputStream()
throws IOException, CMSException;
}
```
|
```java
/*
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing,
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* specific language governing permissions and limitations
*/
package org.apache.weex.uitest.TC_AG;
import org.apache.weex.WXPageActivity;
import org.apache.weex.util.TestFlow;
import java.util.TreeMap;
import org.junit.Before;
import org.junit.Test;
public class AG_Border_A_Border_Bottom_Right_Radius extends TestFlow {
public AG_Border_A_Border_Bottom_Right_Radius() {
super(WXPageActivity.class);
}
@Before
public void setUp() throws InterruptedException {
super.setUp();
TreeMap testMap = new <String, Object> TreeMap();
testMap.put("testComponet", "AG_Border");
testMap.put("testChildCaseInit", "AG_Border_A_Border_Bottom_Right_Radius");
testMap.put("step1",new TreeMap(){
{
put("click", "10");
put("screenshot", "AG_Border_A_Border_Bottom_Right_Radius_01_10");
}
});
testMap.put("step2",new TreeMap(){
{
put("click", "20");
put("screenshot", "AG_Border_A_Border_Bottom_Right_Radius_02_20");
}
});
super.setTestMap(testMap);
}
@Test
public void doTest(){
super.testByTestMap();
}
}
```
|
```c++
/*=============================================================================
file LICENSE_1_0.txt or copy at path_to_url
==============================================================================*/
#if !defined(FUSION_VALUE_AT_IMPL_20060124_2129)
#define FUSION_VALUE_AT_IMPL_20060124_2129
#include <boost/fusion/container/vector/convert.hpp>
#include <boost/fusion/algorithm/transformation/transform.hpp>
#include <boost/fusion/sequence/intrinsic/value_at.hpp>
#include <boost/type_traits/remove_reference.hpp>
#include <boost/fusion/support/unused.hpp>
#include <boost/mpl/eval_if.hpp>
#include <boost/mpl/identity.hpp>
#include <boost/type_traits/is_same.hpp>
#include <boost/config.hpp>
namespace boost { namespace fusion {
struct zip_view_tag;
namespace detail
{
template<typename N>
struct poly_value_at
{
template<typename T>
struct result;
template<typename N1, typename Seq>
struct result<poly_value_at<N1>(Seq)>
: mpl::eval_if<is_same<Seq, unused_type const&>,
mpl::identity<unused_type>,
result_of::value_at<typename remove_reference<Seq>::type, N> >
{};
// never called, but needed for decltype-based result_of (C++0x)
#ifndef BOOST_NO_RVALUE_REFERENCES
template<typename Seq>
typename result<poly_value_at(Seq)>::type
operator()(Seq&&) const;
#endif
};
}
namespace extension
{
template<typename Tag>
struct value_at_impl;
template<>
struct value_at_impl<zip_view_tag>
{
template<typename Sequence, typename N>
struct apply
{
typedef typename result_of::transform<
typename Sequence::sequences,
detail::poly_value_at<N> >::type values;
typedef typename result_of::as_vector<values>::type type;
};
};
}
}}
#endif
```
|
John Leland (May 14, 1754 – January 14, 1841) was an American Baptist minister who preached in Massachusetts and Virginia, as well as an outspoken abolitionist. He was an important figure in the struggle for religious liberty in the United States. Leland also later opposed the rise of missionary societies among Baptists.
Early life
Leland was born on May 14, 1754, in Grafton, Massachusetts.
His parents were Congregationalists. He married Sally Devine and they had nine children, Betsy, Hannah, Polly, John, Sally, Lucy, Fanny, Nancy, & Lemuel.
Public life and views
He was baptized in June 1774 by Elder Noah Alden. Leland joined the Baptist Church in Bellingham, Massachusetts, in 1775. He left for Virginia in 1775 or 1776, and ministered there until 1791, when he returned to Massachusetts.
During the 1788-89 election while still living in Virginia, Leland threw his support behind James Madison due to Madison's support for religious liberty in what became the First Amendment to the Constitution, and Madison was seated in the first Congress that same year. Leland returned to Massachusetts in 1791 the year the Bill of Rights was ratified, leaving Virginia after an anti-slavery sermon. Back in New England, Leland helped to found several Baptist congregations in Connecticut, to which President Jefferson later wrote his famous letter to the Baptists of Danbury, Connecticut in 1802 regarding religious freedom.
A well-known incident in Leland's life was the Cheshire Mammoth Cheese. The people of Cheshire, Massachusetts made and sent a giant block of cheese to President Thomas Jefferson. Leland took the block from Cheshire to Washington, D. C., and presented it to Jefferson on January 1, 1802. While there, Leland was even invited to preach to the Congress and the President. Of this incident he wrote, "In November, 1801 I journeyed to the south, as far as Washington, in charge of a cheese, sent to President Jefferson. Notwithstanding my trust, I preached all the way there and on my return. I had large congregations; let in part by curiosity to hear the Mammoth Priest, as I was called." He was invited to preach a message of religious liberty in Congress upon his arrival.
For Michael I. Meyerson, Leland was the most prominent religious figure of the founding era to champion universal religious freedom. John M. Cobin says that Leland held, in seminal form, to the "liberty of conscience" position on public policy theology.
Leland died on January 14, 1841, in North Adams, Massachusetts. His tombstone reads, "Here lies the body of John Leland, of Cheshire, who labored 67 years to promote piety and vindicate the civil and religious rights of all men."
He was known as a hymn writer; "The Day Is Past and Gone, The Evening Shades Appear" has been included in 391 hymnals. Several of his hymns are preserved in the Sacred Harp.
Leland opposed theological seminaries. Ironically, The John Leland Center for Theological Studies in Virginia is named in his honor. The school was named for Leland for three reasons: his firm stand for religious liberty for all, his opposition to slavery, and his service as a pastor and evangelist.
Excerpts from his writings
"The notion of a Christian commonwealth should be exploded forever...Government should protect every man in thinking and speaking freely, and see that one does not abuse another. The liberty I contend for is more than toleration. The very idea of toleration is despicable; it supposes that some have a pre-eminence above the rest to grant indulgence, whereas all should be equally free, Jews, Turks, Pagans and Christians." - A Chronicle of His Time in Virginia.
"Truth disdains the aid of law for its defense — it will stand upon its own merits." - Right of Conscience Inalienable.
"Every man must give account of himself to God, and therefore every man ought to be at liberty to serve God in a way that he can best reconcile to his conscience. If government can answer for individuals at the day of judgment, let men be controlled by it in religious matters; otherwise, let men be free." - Right of Conscience Inalienable.
"Resolved, that slavery is a violent deprivation of rights of nature and inconsistent with a republican government, and therefore, recommend it to our brethren to make use of every legal measure to extirpate this horrid evil from the land; and pray Almighty God that our honorable legislature may have it in their power to proclaim the great jubilee, consistent with the principles of good policy." - Resolution for the General Committee of Virginia Baptists meeting in Richmond, Virginia, in 1789.
Sources
References
1754 births
1841 deaths
Baptist ministers from the United States
Christian hymnwriters
People from Grafton, Massachusetts
People from Cheshire, Massachusetts
|
```python
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import gridspec
from IPython.display import clear_output
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from matplotlib.text import Annotation
from mpl_toolkits.mplot3d.proj3d import proj_transform
import autograd.numpy as np
from autograd import value_and_grad
from autograd import hessian
from autograd import grad as compute_grad
from autograd.misc.flatten import flatten_func
import math
import time
import copy
class classification_3d_visualizer:
'''
Visualize classification on a 2-class dataset with N = 2
'''
#### initialize ####
def __init__(self,data):
# grab input
data = data.T
self.data = data
self.x = data[:,:-1]
self.y = data[:,-1]
# colors for viewing classification data 'from above'
self.colors = ['cornflowerblue','salmon','lime','bisque','mediumaquamarine','b','m','g']
def center_data(self):
# center data
self.x = self.x - np.mean(self.x)
self.y = self.y - np.mean(self.y)
# the counting cost function - for determining best weights from input weight history
def counting_cost(self,w):
cost = 0
for p in range(0,len(self.y)):
x_p = self.x[p]
y_p = self.y[p]
a_p = w[0] + sum([a*b for a,b in zip(w[1:],x_p)])
cost += (np.sign(a_p) - y_p)**2
return 0.25*cost
######## 3d static and animation functions ########
# produce static image of gradient descent or newton's method run
def static_fig(self,w,**kwargs):
# grab args
zplane = 'on'
if 'zplane' in kwargs:
zplane = kwargs['zplane']
cost_plot = 'off'
if 'cost_plot' in kwargs:
cost_plot = kwargs['cost_plot']
g = 0
if 'g' in kwargs:
g = kwargs['g']
### plot all input data ###
# generate input range for functions
minx = min(min(self.x[:,0]),min(self.x[:,1]))
maxx = max(max(self.x[:,0]),max(self.x[:,1]))
gapx = (maxx - minx)*0.1
minx -= gapx
maxx += gapx
r = np.linspace(minx,maxx,400)
x1_vals,x2_vals = np.meshgrid(r,r)
x1_vals.shape = (len(r)**2,1)
x2_vals.shape = (len(r)**2,1)
h = np.concatenate([x1_vals,x2_vals],axis = 1)
g_vals = np.tanh( w[0] + w[1]*x1_vals + w[2]*x2_vals )
g_vals = np.asarray(g_vals)
# vals for cost surface
x1_vals.shape = (len(r),len(r))
x2_vals.shape = (len(r),len(r))
g_vals.shape = (len(r),len(r))
# create figure to plot
num_panels = 2
fig_len = 9
widths = [1,1]
if cost_plot == 'on':
num_panels = 3
fig_len = 8
widths = [2,2,1]
fig, axs = plt.subplots(1, num_panels, figsize=(fig_len,4))
gs = gridspec.GridSpec(1, num_panels, width_ratios=widths)
ax1 = plt.subplot(gs[0],projection='3d');
ax2 = plt.subplot(gs[1],aspect = 'equal');
ax3 = 0
if cost_plot == 'on':
ax3 = plt.subplot(gs[2],aspect = 0.5);
fig.subplots_adjust(left=0,right=1,bottom=0,top=1) # remove whitespace around 3d figure
# plot points - first in 3d, then from above
self.scatter_pts(ax1)
self.separator_view(ax2)
# set zaxis to the left
self.move_axis_left(ax1)
# set view
if 'view' in kwargs:
view = kwargs['view']
ax1.view_init(view[0],view[1])
class_nums = np.unique(self.y)
C = len(class_nums)
# plot regression surface
ax1.plot_surface(x1_vals,x2_vals,g_vals,alpha = 0.1,color = 'k',rstride=20, cstride=20,linewidth=0,edgecolor = 'k')
# plot zplane = 0 in left 3d panel - showing intersection of regressor with z = 0 (i.e., its contour, the separator, in the 3d plot too)?
if zplane == 'on':
ax1.plot_surface(x1_vals,x2_vals,g_vals*0,alpha = 0.1,rstride=20, cstride=20,linewidth=0.15,color = 'w',edgecolor = 'k')
# plot separator curve in left plot
ax1.contour(x1_vals,x2_vals,g_vals,colors = 'k',levels = [0],linewidths = 3,zorder = 1)
if C == 2:
ax1.contourf(x1_vals,x2_vals,g_vals,colors = self.colors[1],levels = [0,1],zorder = 1,alpha = 0.1)
ax1.contourf(x1_vals,x2_vals,g_vals+1,colors = self.colors[0],levels = [0,1],zorder = 1,alpha = 0.1)
# plot separator in right plot
ax2.contour(x1_vals,x2_vals,g_vals,colors = 'k',levels = [0],linewidths = 3,zorder = 1)
# plot color filled contour based on separator
if C == 2:
g_vals = np.sign(g_vals) + 1
ax2.contourf(x1_vals,x2_vals,g_vals,colors = self.colors[:],alpha = 0.1,levels = range(0,C+1))
else:
ax2.contourf(x1_vals,x2_vals,g_vals,colors = self.colors[:],alpha = 0.1,levels = range(0,C+1))
# plot cost function value
if cost_plot == 'on':
# plot cost function history
g_hist = []
for j in range(len(w_hist)):
w = w_hist[j]
g_eval = g(w)
g_hist.append(g_eval)
g_hist = np.asarray(g_hist).flatten()
# plot cost function history
ax3.plot(np.arange(len(g_hist)),g_hist,linewidth = 2)
ax3.set_xlabel('iteration',fontsize = 13)
ax3.set_title('cost value',fontsize = 12)
plt.show()
# produce static image of gradient descent or newton's method run
def static_fig_topview(self,w,**kwargs):
### plot all input data ###
# generate input range for functions
minx = min(min(self.x[:,0]),min(self.x[:,1]))
maxx = max(max(self.x[:,0]),max(self.x[:,1]))
gapx = (maxx - minx)*0.1
minx -= gapx
maxx += gapx
r = np.linspace(minx,maxx,400)
x1_vals,x2_vals = np.meshgrid(r,r)
x1_vals.shape = (len(r)**2,1)
x2_vals.shape = (len(r)**2,1)
h = np.concatenate([x1_vals,x2_vals],axis = 1)
g_vals = np.tanh( w[0] + w[1]*x1_vals + w[2]*x2_vals )
g_vals = np.asarray(g_vals)
# vals for cost surface
x1_vals.shape = (len(r),len(r))
x2_vals.shape = (len(r),len(r))
g_vals.shape = (len(r),len(r))
# create figure to plot
### initialize figure
fig = plt.figure(figsize = (9,4))
gs = gridspec.GridSpec(1, 3, width_ratios=[1,5,1])
ax1 = plt.subplot(gs[0]); ax1.axis('off')
ax2 = plt.subplot(gs[1],aspect = 'equal');
ax3 = plt.subplot(gs[2]); ax3.axis('off')
# plot points - first in 3d, then from above
self.separator_view(ax2)
class_nums = np.unique(self.y)
C = len(class_nums)
# plot separator in right plot
ax2.contour(x1_vals,x2_vals,g_vals,colors = 'k',levels = [0],linewidths = 3,zorder = 1)
# plot color filled contour based on separator
if C == 2:
g_vals = np.sign(g_vals) + 1
ax2.contourf(x1_vals,x2_vals,g_vals,colors = self.colors[:],alpha = 0.1,levels = range(0,C+1))
else:
ax2.contourf(x1_vals,x2_vals,g_vals,colors = self.colors[:],alpha = 0.1,levels = range(0,C+1))
plt.show()
# set axis in left panel
def move_axis_left(self,ax):
tmp_planes = ax.zaxis._PLANES
ax.zaxis._PLANES = ( tmp_planes[2], tmp_planes[3],
tmp_planes[0], tmp_planes[1],
tmp_planes[4], tmp_planes[5])
view_1 = (25, -135)
view_2 = (25, -45)
init_view = view_2
ax.view_init(*init_view)
###### plot plotting functions ######
def plot_data(self,**kwargs):
# construct figure
fig, axs = plt.subplots(1, 3, figsize=(9,4))
fig.subplots_adjust(left=0,right=1,bottom=0,top=1) # remove whitespace around 3d figure
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1])
ax1 = plt.subplot(gs[0],projection='3d');
ax2 = plt.subplot(gs[1],aspect = 'equal');
# plot points - first in 3d, then from above
self.scatter_pts(ax1)
self.separator_view(ax2)
# set zaxis to the left
self.move_axis_left(ax1)
# set view
if 'view' in kwargs:
view = kwargs['view']
ax1.view_init(view[0],view[1])
# scatter points
def scatter_pts(self,ax):
if np.shape(self.x)[1] == 2:
# set plotting limits
xmax1 = copy.deepcopy(max(self.x[:,0]))
xmin1 = copy.deepcopy(min(self.x[:,0]))
xgap1 = (xmax1 - xmin1)*0.35
xmin1 -= xgap1
xmax1 += xgap1
xmax2 = copy.deepcopy(max(self.x[:,0]))
xmin2 = copy.deepcopy(min(self.x[:,0]))
xgap2 = (xmax2 - xmin2)*0.35
xmin2 -= xgap2
xmax2 += xgap2
ymax = max(self.y)
ymin = min(self.y)
ygap = (ymax - ymin)*0.2
ymin -= ygap
ymax += ygap
# scatter points in both panels
class_nums = np.unique(self.y)
C = len(class_nums)
for c in range(C):
ind = np.argwhere(self.y == class_nums[c])
ind = [v[0] for v in ind]
ax.scatter(self.x[ind,0],self.x[ind,1],self.y[ind],s = 80,color = self.colors[c],edgecolor = 'k',linewidth = 1.5)
# clean up panel
ax.set_xlim([xmin1,xmax1])
ax.set_ylim([xmin2,xmax2])
ax.set_zlim([ymin,ymax])
ax.set_xticks(np.arange(round(xmin1) +1, round(xmax1), 1.0))
ax.set_yticks(np.arange(round(xmin2) +1, round(xmax2), 1.0))
ax.set_zticks([-1,0,1])
# label axes
ax.set_xlabel(r'$x_1$', fontsize = 12,labelpad = 5)
ax.set_ylabel(r'$x_2$', rotation = 0,fontsize = 12,labelpad = 5)
ax.set_zlabel(r'$y$', rotation = 0,fontsize = 12,labelpad = -3)
# clean up panel
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.xaxis.pane.set_edgecolor('white')
ax.yaxis.pane.set_edgecolor('white')
ax.zaxis.pane.set_edgecolor('white')
ax.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
# plot data 'from above' in seperator view
def separator_view(self,ax):
# set plotting limits
xmax1 = copy.deepcopy(max(self.x[:,0]))
xmin1 = copy.deepcopy(min(self.x[:,0]))
xgap1 = (xmax1 - xmin1)*0.05
xmin1 -= xgap1
xmax1 += xgap1
xmax2 = copy.deepcopy(max(self.x[:,0]))
xmin2 = copy.deepcopy(min(self.x[:,0]))
xgap2 = (xmax2 - xmin2)*0.05
xmin2 -= xgap2
xmax2 += xgap2
ymax = max(self.y)
ymin = min(self.y)
ygap = (ymax - ymin)*0.2
ymin -= ygap
ymax += ygap
# scatter points
classes = np.unique(self.y)
count = 0
for num in classes:
inds = np.argwhere(self.y == num)
inds = [s[0] for s in inds]
ax.scatter(self.data[inds,0],self.data[inds,1],color = self.colors[int(count)],linewidth = 1,marker = 'o',edgecolor = 'k',s = 50)
count+=1
# clean up panel
ax.set_xlim([xmin1,xmax1])
ax.set_ylim([xmin2,xmax2])
ax.set_xticks(np.arange(round(xmin1), round(xmax1) + 1, 1.0))
ax.set_yticks(np.arange(round(xmin2), round(xmax2) + 1, 1.0))
# label axes
ax.set_xlabel(r'$x_1$', fontsize = 12,labelpad = 0)
ax.set_ylabel(r'$x_2$', rotation = 0,fontsize = 12,labelpad = 5)
class classification_2d_visualizer:
'''
Visualize logistic regression applied to a 2-class dataset with N = 2
'''
#### initialize ####
def __init__(self,data,g):
# grab input
data = data.T
self.data = data
self.x = data[:,:-1]
self.y = data[:,-1]
self.g = g
# colors for viewing classification data 'from above'
self.colors = ['cornflowerblue','salmon','lime','bisque','mediumaquamarine','b','m','g']
# the counting cost function - for determining best weights from input weight history
def counting_cost(self,w):
cost = 0
for p in range(0,len(self.y)):
x_p = self.x[p]
y_p = self.y[p]
a_p = w[0] + sum([a*b for a,b in zip(w[1:],x_p)])
cost += (np.sign(a_p) - y_p)**2
return 0.25*cost
######## 2d functions ########
# animate gradient descent or newton's method
def animate_run(self,savepath,w_hist,**kwargs):
self.w_hist = w_hist
##### setup figure to plot #####
# initialize figure
fig = plt.figure(figsize = (8,3))
artist = fig
# create subplot with 3 panels, plot input function in center plot
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1])
ax1 = plt.subplot(gs[0]);
ax2 = plt.subplot(gs[1]);
# produce color scheme
s = np.linspace(0,1,len(self.w_hist[:round(len(w_hist)/2)]))
s.shape = (len(s),1)
t = np.ones(len(self.w_hist[round(len(w_hist)/2):]))
t.shape = (len(t),1)
s = np.vstack((s,t))
self.colorspec = []
self.colorspec = np.concatenate((s,np.flipud(s)),1)
self.colorspec = np.concatenate((self.colorspec,np.zeros((len(s),1))),1)
# seed left panel plotting range
xmin = copy.deepcopy(min(self.x))
xmax = copy.deepcopy(max(self.x))
xgap = (xmax - xmin)*0.1
xmin-=xgap
xmax+=xgap
x_fit = np.linspace(xmin,xmax,300)
# seed right panel contour plot
viewmax = 3
if 'viewmax' in kwargs:
viewmax = kwargs['viewmax']
view = [20,100]
if 'view' in kwargs:
view = kwargs['view']
num_contours = 15
if 'num_contours' in kwargs:
num_contours = kwargs['num_contours']
self.contour_plot(ax2,viewmax,num_contours)
# start animation
num_frames = len(self.w_hist)
print ('starting animation rendering...')
def animate(k):
# clear panels
ax1.cla()
# current color
color = self.colorspec[k]
# print rendering update
if np.mod(k+1,25) == 0:
print ('rendering animation frame ' + str(k+1) + ' of ' + str(num_frames))
if k == num_frames - 1:
print ('animation rendering complete!')
time.sleep(1.5)
clear_output()
###### make left panel - plot data and fit ######
# initialize fit
w = self.w_hist[k]
y_fit = np.tanh(w[0] + x_fit*w[1])
# scatter data
self.scatter_pts(ax1)
# plot fit to data
ax1.plot(x_fit,y_fit,color = color,linewidth = 2)
###### make right panel - plot contour and steps ######
if k == 0:
ax2.scatter(w[0],w[1],s = 90,facecolor = color,edgecolor = 'k',linewidth = 0.5, zorder = 3)
if k > 0 and k < num_frames:
self.plot_pts_on_contour(ax2,k,color)
if k == num_frames -1:
ax2.scatter(w[0],w[1],s = 90,facecolor = color,edgecolor = 'k',linewidth = 0.5, zorder = 3)
return artist,
anim = animation.FuncAnimation(fig, animate ,frames=num_frames, interval=num_frames, blit=True)
# produce animation and save
fps = 50
if 'fps' in kwargs:
fps = kwargs['fps']
anim.save(savepath, fps=fps, extra_args=['-vcodec', 'libx264'])
clear_output()
# produce static image of gradient descent or newton's method run
def static_fig(self,w_hist,**kwargs):
self.w_hist = w_hist
ind = -1
show_path = True
if np.size(w_hist) == 0:
show_path = False
w = 0
if show_path:
w = w_hist[ind]
##### setup figure to plot #####
# initialize figure
fig = plt.figure(figsize = (8,3))
artist = fig
# create subplot with 3 panels, plot input function in center plot
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1])
ax1 = plt.subplot(gs[0]);
ax2 = plt.subplot(gs[1]);
# produce color scheme
s = np.linspace(0,1,len(self.w_hist[:round(len(self.w_hist)/2)]))
s.shape = (len(s),1)
t = np.ones(len(self.w_hist[round(len(self.w_hist)/2):]))
t.shape = (len(t),1)
s = np.vstack((s,t))
self.colorspec = []
self.colorspec = np.concatenate((s,np.flipud(s)),1)
self.colorspec = np.concatenate((self.colorspec,np.zeros((len(s),1))),1)
# seed left panel plotting range
xmin = copy.deepcopy(min(self.x))
xmax = copy.deepcopy(max(self.x))
xgap = (xmax - xmin)*0.1
xmin-=xgap
xmax+=xgap
x_fit = np.linspace(xmin,xmax,300)
# seed right panel contour plot
viewmax = 3
if 'viewmax' in kwargs:
viewmax = kwargs['viewmax']
view = [20,100]
if 'view' in kwargs:
view = kwargs['view']
num_contours = 15
if 'num_contours' in kwargs:
num_contours = kwargs['num_contours']
### contour plot in right panel ###
self.contour_plot(ax2,viewmax,num_contours)
### make left panel - plot data and fit ###
# scatter data
self.scatter_pts(ax1)
if show_path:
# initialize fit
y_fit = np.tanh(w[0] + x_fit*w[1])
# plot fit to data
color = self.colorspec[-1]
ax1.plot(x_fit,y_fit,color = color,linewidth = 2)
# add points to right panel contour plot
num_frames = len(self.w_hist)
for k in range(num_frames):
# current color
color = self.colorspec[k]
# current weights
w = self.w_hist[k]
###### make right panel - plot contour and steps ######
if k == 0:
ax2.scatter(w[0],w[1],s = 90,facecolor = color,edgecolor = 'k',linewidth = 0.5, zorder = 3)
if k > 0 and k < num_frames:
self.plot_pts_on_contour(ax2,k,color)
if k == num_frames -1:
ax2.scatter(w[0],w[1],s = 90,facecolor = color,edgecolor = 'k',linewidth = 0.5, zorder = 3)
plt.show()
###### plot plotting functions ######
def plot_data(self,**kwargs):
# construct figure
fig, axs = plt.subplots(1, 3, figsize=(9,3))
if np.shape(self.x)[1] == 1:
# create subplot with 2 panels
gs = gridspec.GridSpec(1, 3, width_ratios=[1,2,1])
ax1 = plt.subplot(gs[0]); ax1.axis('off')
ax2 = plt.subplot(gs[1]);
ax3 = plt.subplot(gs[2]); ax3.axis('off')
# scatter points
self.scatter_pts(ax2)
if np.shape(self.x)[1] == 2:
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1])
ax1 = plt.subplot(gs[0],projection='3d');
ax2 = plt.subplot(gs[1],aspect = 'equal');
#gs.update(wspace=0.025, hspace=0.05) # set spacing between axes.
# plot points - first in 3d, then from above
self.scatter_pts(ax1)
self.separator_view(ax2)
# set zaxis to the left
self.move_axis_left(ax1)
# set view
if 'view' in kwargs:
view = kwargs['view']
ax1.view_init(view[0],view[1])
# scatter points
def scatter_pts(self,ax):
if np.shape(self.x)[1] == 1:
# set plotting limits
xmax = copy.deepcopy(max(self.x))
xmin = copy.deepcopy(min(self.x))
xgap = (xmax - xmin)*0.2
xmin -= xgap
xmax += xgap
ymax = max(self.y)
ymin = min(self.y)
ygap = (ymax - ymin)*0.2
ymin -= ygap
ymax += ygap
# initialize points
ax.scatter(self.x,self.y,color = 'k', edgecolor = 'w',linewidth = 0.9,s = 40)
# clean up panel
ax.set_xlim([xmin,xmax])
ax.set_ylim([ymin,ymax])
# label axes
ax.set_xlabel(r'$x$', fontsize = 12)
ax.set_ylabel(r'$y$', rotation = 0,fontsize = 12)
ax.set_title('data', fontsize = 13)
ax.axhline(y=0, color='k',zorder = 0,linewidth = 0.5)
ax.axvline(x=0, color='k',zorder = 0,linewidth = 0.5)
if np.shape(self.x)[1] == 2:
# set plotting limits
xmax1 = copy.deepcopy(max(self.x[:,0]))
xmin1 = copy.deepcopy(min(self.x[:,0]))
xgap1 = (xmax1 - xmin1)*0.35
xmin1 -= xgap1
xmax1 += xgap1
xmax2 = copy.deepcopy(max(self.x[:,0]))
xmin2 = copy.deepcopy(min(self.x[:,0]))
xgap2 = (xmax2 - xmin2)*0.35
xmin2 -= xgap2
xmax2 += xgap2
ymax = max(self.y)
ymin = min(self.y)
ygap = (ymax - ymin)*0.2
ymin -= ygap
ymax += ygap
# scatter points in both panels
class_nums = np.unique(self.y)
C = len(class_nums)
for c in range(C):
ind = np.argwhere(self.y == class_nums[c])
ind = [v[0] for v in ind]
ax.scatter(self.x[ind,0],self.x[ind,1],self.y[ind],s = 80,color = self.colors[c],edgecolor = 'k',linewidth = 1.5)
# clean up panel
ax.set_xlim([xmin1,xmax1])
ax.set_ylim([xmin2,xmax2])
ax.set_zlim([ymin,ymax])
ax.set_xticks(np.arange(round(xmin1) +1, round(xmax1), 1.0))
ax.set_yticks(np.arange(round(xmin2) +1, round(xmax2), 1.0))
ax.set_zticks([-1,0,1])
# label axes
ax.set_xlabel(r'$x_1$', fontsize = 12,labelpad = 5)
ax.set_ylabel(r'$x_2$', rotation = 0,fontsize = 12,labelpad = 5)
ax.set_zlabel(r'$y$', rotation = 0,fontsize = 12,labelpad = -3)
# clean up panel
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.xaxis.pane.set_edgecolor('white')
ax.yaxis.pane.set_edgecolor('white')
ax.zaxis.pane.set_edgecolor('white')
ax.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
# plot data 'from above' in seperator view
def separator_view(self,ax):
# set plotting limits
xmax1 = copy.deepcopy(max(self.x[:,0]))
xmin1 = copy.deepcopy(min(self.x[:,0]))
xgap1 = (xmax1 - xmin1)*0.05
xmin1 -= xgap1
xmax1 += xgap1
xmax2 = copy.deepcopy(max(self.x[:,0]))
xmin2 = copy.deepcopy(min(self.x[:,0]))
xgap2 = (xmax2 - xmin2)*0.05
xmin2 -= xgap2
xmax2 += xgap2
ymax = max(self.y)
ymin = min(self.y)
ygap = (ymax - ymin)*0.2
ymin -= ygap
ymax += ygap
# scatter points
classes = np.unique(self.y)
count = 0
for num in classes:
inds = np.argwhere(self.y == num)
inds = [s[0] for s in inds]
plt.scatter(self.data[inds,0],self.data[inds,1],color = self.colors[int(count)],linewidth = 1,marker = 'o',edgecolor = 'k',s = 50)
count+=1
# clean up panel
ax.set_xlim([xmin1,xmax1])
ax.set_ylim([xmin2,xmax2])
ax.set_xticks(np.arange(round(xmin1), round(xmax1) + 1, 1.0))
ax.set_yticks(np.arange(round(xmin2), round(xmax2) + 1, 1.0))
# label axes
ax.set_xlabel(r'$x_1$', fontsize = 12,labelpad = 0)
ax.set_ylabel(r'$x_2$', rotation = 0,fontsize = 12,labelpad = 5)
# plot points on contour
def plot_pts_on_contour(self,ax,j,color):
# plot connector between points for visualization purposes
w_old = self.w_hist[j-1]
w_new = self.w_hist[j]
g_old = self.g(w_old)
g_new = self.g(w_new)
ax.plot([w_old[0],w_new[0]],[w_old[1],w_new[1]],color = color,linewidth = 3,alpha = 1,zorder = 2) # plot approx
ax.plot([w_old[0],w_new[0]],[w_old[1],w_new[1]],color = 'k',linewidth = 3 + 1,alpha = 1,zorder = 1) # plot approx
###### function plotting functions #######
def plot_ls_cost(self,**kwargs):
# construct figure
fig, axs = plt.subplots(1, 2, figsize=(8,3))
# create subplot with 2 panels
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1])
ax1 = plt.subplot(gs[0],aspect = 'equal');
ax2 = plt.subplot(gs[1],projection='3d');
# pull user-defined args
viewmax = 3
if 'viewmax' in kwargs:
viewmax = kwargs['viewmax']
view = [20,100]
if 'view' in kwargs:
view = kwargs['view']
num_contours = 15
if 'num_contours' in kwargs:
num_contours = kwargs['num_contours']
# make contour plot in left panel
self.contour_plot(ax1,viewmax,num_contours)
# make contour plot in right panel
self.surface_plot(ax2,viewmax,view)
plt.show()
### visualize the surface plot of cost function ###
def surface_plot(self,ax,wmax,view):
##### Produce cost function surface #####
wmax += wmax*0.1
r = np.linspace(-wmax,wmax,200)
# create grid from plotting range
w1_vals,w2_vals = np.meshgrid(r,r)
w1_vals.shape = (len(r)**2,1)
w2_vals.shape = (len(r)**2,1)
w_ = np.concatenate((w1_vals,w2_vals),axis = 1)
g_vals = []
for i in range(len(r)**2):
g_vals.append(self.g(w_[i,:]))
g_vals = np.asarray(g_vals)
# reshape and plot the surface, as well as where the zero-plane is
w1_vals.shape = (np.size(r),np.size(r))
w2_vals.shape = (np.size(r),np.size(r))
g_vals.shape = (np.size(r),np.size(r))
# plot cost surface
ax.plot_surface(w1_vals,w2_vals,g_vals,alpha = 0.1,color = 'w',rstride=25, cstride=25,linewidth=1,edgecolor = 'k',zorder = 2)
# clean up panel
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.xaxis.pane.set_edgecolor('white')
ax.yaxis.pane.set_edgecolor('white')
ax.zaxis.pane.set_edgecolor('white')
ax.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.set_xlabel(r'$w_0$',fontsize = 12)
ax.set_ylabel(r'$w_1$',fontsize = 12,rotation = 0)
ax.set_title(r'$g\left(w_0,w_1\right)$',fontsize = 13)
ax.view_init(view[0],view[1])
### visualize contour plot of cost function ###
def contour_plot(self,ax,wmax,num_contours):
#### define input space for function and evaluate ####
w1 = np.linspace(-wmax,wmax,100)
w2 = np.linspace(-wmax,wmax,100)
w1_vals, w2_vals = np.meshgrid(w1,w2)
w1_vals.shape = (len(w1)**2,1)
w2_vals.shape = (len(w2)**2,1)
h = np.concatenate((w1_vals,w2_vals),axis=1)
func_vals = np.asarray([ self.g(np.reshape(s,(2,1))) for s in h])
#func_vals = np.asarray([self.g(s) for s in h])
w1_vals.shape = (len(w1),len(w1))
w2_vals.shape = (len(w2),len(w2))
func_vals.shape = (len(w1),len(w2))
### make contour right plot - as well as horizontal and vertical axes ###
# set level ridges
levelmin = min(func_vals.flatten())
levelmax = max(func_vals.flatten())
cutoff = 0.5
cutoff = (levelmax - levelmin)*cutoff
numper = 3
levels1 = np.linspace(cutoff,levelmax,numper)
num_contours -= numper
levels2 = np.linspace(levelmin,cutoff,min(num_contours,numper))
levels = np.unique(np.append(levels1,levels2))
num_contours -= numper
while num_contours > 0:
cutoff = levels[1]
levels2 = np.linspace(levelmin,cutoff,min(num_contours,numper))
levels = np.unique(np.append(levels2,levels))
num_contours -= numper
a = ax.contour(w1_vals, w2_vals, func_vals,levels = levels,colors = 'k')
ax.contourf(w1_vals, w2_vals, func_vals,levels = levels,cmap = 'Blues')
# clean up panel
ax.set_xlabel('$w_0$',fontsize = 12)
ax.set_ylabel('$w_1$',fontsize = 12,rotation = 0)
ax.set_title(r'$g\left(w_0,w_1\right)$',fontsize = 13)
ax.axhline(y=0, color='k',zorder = 0,linewidth = 0.5)
ax.axvline(x=0, color='k',zorder = 0,linewidth = 0.5)
ax.set_xlim([-wmax,wmax])
ax.set_ylim([-wmax,wmax])
# gradient descent function - inputs: g (input function), alpha (steplength parameter), max_its (maximum number of iterations), w (initialization)
def gradient_descent(g,alpha_choice,max_its,w):
# flatten the input function to more easily deal with costs that have layers of parameters
g_flat, unflatten, w = flatten_func(g, w) # note here the output 'w' is also flattened
# compute the gradient function of our input function - note this is a function too
# that - when evaluated - returns both the gradient and function evaluations (remember
# as discussed in Chapter 3 we always ge the function evaluation 'for free' when we use
# an Automatic Differntiator to evaluate the gradient)
gradient = value_and_grad(g_flat)
# run the gradient descent loop
weight_history = [] # container for weight history
cost_history = [] # container for corresponding cost function history
alpha = 0
for k in range(1,max_its+1):
# check if diminishing steplength rule used
if alpha_choice == 'diminishing':
alpha = 1/float(k)
else:
alpha = alpha_choice
# evaluate the gradient, store current (unflattened) weights and cost function value
cost_eval,grad_eval = gradient(w)
weight_history.append(unflatten(w))
cost_history.append(cost_eval)
# take gradient descent step
w = w - alpha*grad_eval
# collect final weights
weight_history.append(unflatten(w))
# compute final cost function value via g itself (since we aren't computing
# the gradient at the final step we don't get the final cost function value
# via the Automatic Differentiatoor)
cost_history.append(g_flat(w))
return weight_history,cost_history
class cost_visualizer:
'''
Visualize an input cost function based on data.
'''
#### initialize ####
def __init__(self,data):
# grab input
data = data.T
self.x = data[:,:-1]
self.y = data[:,-1]
# least squares
def counting_cost(self,w):
cost = 0
for p in range(0,len(self.y)):
x_p = self.x[p,:]
y_p = self.y[p]
a_p = w[0] + np.sum([u*v for (u,v) in zip(x_p,w[1:])])
e = 0
if np.sign(a_p) != y_p:
cost += 1
return float(cost)
# log-loss
def log_loss(self,w):
cost = 0
for p in range(0,len(self.y)):
x_p = self.x[p,:]
y_p = self.y[p]
a_p = w[0] + np.sum([u*v for (u,v) in zip(x_p,w[1:])])
cost += np.log(1 + np.exp(-y_p*a_p))
return cost
# tanh non-convex least squares
def tanh_least_squares(self,w):
cost = 0
for p in range(0,len(self.y)):
x_p = self.x[p,:]
y_p = self.y[p]
a_p = w[0] + np.sum([u*v for (u,v) in zip(x_p,w[1:])])
cost +=(np.tanh(a_p) - y_p)**2
return cost
###### function plotting functions #######
def plot_costs(self,**kwargs):
# construct figure
fig, axs = plt.subplots(1, 2, figsize=(8,3))
# create subplot with 2 panels
gs = gridspec.GridSpec(1, 3, width_ratios=[1,1,1])
ax1 = plt.subplot(gs[0],projection='3d');
ax2 = plt.subplot(gs[1],projection='3d');
ax3 = plt.subplot(gs[2],projection='3d');
# pull user-defined args
viewmax = 3
if 'viewmax' in kwargs:
viewmax = kwargs['viewmax']
view = [20,100]
if 'view' in kwargs:
view = kwargs['view']
# make contour plot in each panel
g = self.counting_cost
self.surface_plot(g,ax1,viewmax,view)
g = self.tanh_least_squares
self.surface_plot(g,ax2,viewmax,view)
g = self.log_loss
self.surface_plot(g,ax3,viewmax,view)
plt.show()
### visualize the surface plot of cost function ###
def surface_plot(self,g,ax,wmax,view):
##### Produce cost function surface #####
r = np.linspace(-wmax,wmax,300)
# create grid from plotting range
w1_vals,w2_vals = np.meshgrid(r,r)
w1_vals.shape = (len(r)**2,1)
w2_vals.shape = (len(r)**2,1)
w_ = np.concatenate((w1_vals,w2_vals),axis = 1)
g_vals = []
for i in range(len(r)**2):
g_vals.append(g(w_[i,:]))
g_vals = np.asarray(g_vals)
w1_vals.shape = (np.size(r),np.size(r))
w2_vals.shape = (np.size(r),np.size(r))
### is this a counting cost? if so re-calculate ###
levels = np.unique(g_vals)
if np.size(levels) < 30:
# plot each level of the counting cost
levels = np.unique(g_vals)
for u in levels:
# make copy of cost and nan out all non level entries
z = g_vals.copy()
ind = np.argwhere(z != u)
ind = [v[0] for v in ind]
z[ind] = np.nan
# plot the current level
z.shape = (len(r),len(r))
ax.plot_surface(w1_vals,w2_vals,z,alpha = 0.4,color = '#696969',zorder = 0,shade = True,linewidth=0)
else: # smooth cost function, plot usual
# reshape and plot the surface, as well as where the zero-plane is
g_vals.shape = (np.size(r),np.size(r))
# plot cost surface
ax.plot_surface(w1_vals,w2_vals,g_vals,alpha = 0.1,color = 'w',rstride=25, cstride=25,linewidth=1,edgecolor = 'k',zorder = 2)
### clean up panel ###
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.xaxis.pane.set_edgecolor('white')
ax.yaxis.pane.set_edgecolor('white')
ax.zaxis.pane.set_edgecolor('white')
ax.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.set_xlabel(r'$w_0$',fontsize = 12)
ax.set_ylabel(r'$w_1$',fontsize = 12,rotation = 0)
ax.view_init(view[0],view[1])
class static_visualizer:
'''
Illustrate a run of your preferred optimization algorithm on a one or two-input function. Run
the algorithm first, and input the resulting weight history into this wrapper.
'''
# compare cost histories from multiple runs
def plot_cost_histories(self,histories,start,**kwargs):
# plotting colors
colors = ['k','magenta','aqua','blueviolet','chocolate']
# initialize figure
fig = plt.figure(figsize = (10,3))
# create subplot with 1 panel
gs = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs[0]);
# any labels to add?
labels = [' ',' ']
if 'labels' in kwargs:
labels = kwargs['labels']
# plot points on cost function plot too?
points = False
if 'points' in kwargs:
points = kwargs['points']
# run through input histories, plotting each beginning at 'start' iteration
for c in range(len(histories)):
history = histories[c]
label = 0
if c == 0:
label = labels[0]
else:
label = labels[1]
# check if a label exists, if so add it to the plot
if np.size(label) == 0:
ax.plot(np.arange(start,len(history),1),history[start:],linewidth = 3*(0.8)**(c),color = colors[c])
else:
ax.plot(np.arange(start,len(history),1),history[start:],linewidth = 3*(0.8)**(c),color = colors[c],label = label)
# check if points should be plotted for visualization purposes
if points == True:
ax.scatter(np.arange(start,len(history),1),history[start:],s = 90,color = colors[c],edgecolor = 'w',linewidth = 2,zorder = 3)
# clean up panel
xlabel = 'step $k$'
if 'xlabel' in kwargs:
xlabel = kwargs['xlabel']
ylabel = r'$g\left(\mathbf{w}^k\right)$'
if 'ylabel' in kwargs:
ylabel = kwargs['ylabel']
ax.set_xlabel(xlabel,fontsize = 14)
ax.set_ylabel(ylabel,fontsize = 14,rotation = 0,labelpad = 25)
if np.size(label) > 0:
anchor = (1,1)
if 'anchor' in kwargs:
anchor = kwargs['anchor']
plt.legend(loc='upper right', bbox_to_anchor=anchor)
#leg = ax.legend(loc='upper left', bbox_to_anchor=(1.02, 1), borderaxespad=0)
ax.set_xlim([start - 0.5,len(history) - 0.5])
# fig.tight_layout()
plt.show()
### makes color spectrum for plotted run points - from green (start) to red (stop)
def make_colorspec(self,w_hist):
# make color range for path
s = np.linspace(0,1,len(w_hist[:round(len(w_hist)/2)]))
s.shape = (len(s),1)
t = np.ones(len(w_hist[round(len(w_hist)/2):]))
t.shape = (len(t),1)
s = np.vstack((s,t))
colorspec = []
colorspec = np.concatenate((s,np.flipud(s)),1)
colorspec = np.concatenate((colorspec,np.zeros((len(s),1))),1)
return colorspec
```
|
```javascript
/**
* @license Apache-2.0
*
*
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
'use strict';
// MAIN //
/**
* Revives a JSON-serialized regular expression.
*
* @param {string} key - key
* @param {*} value - value
* @returns {(*|RegExp)} regular expression
*
* @example
* var parseJSON = require( '@stdlib/utils/parse-json' );
*
* var str = '{"type":"RegExp","pattern":"ab+c","flags":""}';
*
* var re = parseJSON( str, reviver );
* // returns <RegExp>
*/
function reviver( key, value ) {
if ( value && value.type === 'RegExp' && value.pattern ) {
return new RegExp( value.pattern, value.flags );
}
return value;
}
// EXPORTS //
module.exports = reviver;
```
|
John "Big John" Lee (March 5, 1845 – January 16, 1915) was a farmer and politician in Ontario, Canada. He represented Kent East in the Legislative Assembly of Ontario from 1901 to 1904 as a Liberal.
The son of John and Sarah Lee, both natives of Ireland, he was born in Orford township and was educated there. In 1865, he married Rebecca Attridge. He was a Methodist. Lee imported Lincoln sheep from England to raise on his farm.
Lee served on the Orford township council from 1869 to 1870, was deputy reeve in 1872, was reeve from 1873 to 1878 and was warden for Kent County in 1875. He was also a justice of the peace. He was first elected to the Ontario assembly in a 1901 by-election held following the death of Robert Ferguson.
References
External links
1845 births
1915 deaths
Ontario Liberal Party MPPs
|
```c++
/*!
@file
Defines `boost::hana::Functor`.
@copyright Louis Dionne 2013-2017
(See accompanying file LICENSE.md or copy at path_to_url
*/
#ifndef BOOST_HANA_CONCEPT_FUNCTOR_HPP
#define BOOST_HANA_CONCEPT_FUNCTOR_HPP
#include <boost/hana/fwd/concept/functor.hpp>
#include <boost/hana/adjust_if.hpp>
#include <boost/hana/config.hpp>
#include <boost/hana/core/default.hpp>
#include <boost/hana/core/tag_of.hpp>
#include <boost/hana/detail/integral_constant.hpp>
#include <boost/hana/transform.hpp>
BOOST_HANA_NAMESPACE_BEGIN
template <typename F>
struct Functor
: hana::integral_constant<bool,
!is_default<transform_impl<typename tag_of<F>::type>>::value ||
!is_default<adjust_if_impl<typename tag_of<F>::type>>::value
>
{ };
BOOST_HANA_NAMESPACE_END
#endif // !BOOST_HANA_CONCEPT_FUNCTOR_HPP
```
|
Mandvi is one of the 182 Legislative Assembly constituencies of Gujarat state in India. It is part of Kachchh district and is a segment of Kachchh Lok Sabha constituency. It is numbered as 2-Mandvi.
List of segments
This assembly seat represents the following talukas. This assembly seat represents the following segments:
Mandvi Taluka
Mundra Taluka
Members of Legislative Assembly
Election results
2022
2017
2012
2007
2002
1998
1995
1990
1985
1980
1975
1972
1967
1962
References
External links
Assembly constituencies of Gujarat
Politics of Kutch district
|
```objective-c
// Protocol Buffers - Google's data interchange format
// path_to_url
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: robinson@google.com (Will Robinson)
//
// Generates Python code for a given .proto file.
#ifndef GOOGLE_PROTOBUF_COMPILER_PYTHON_GENERATOR_H__
#define GOOGLE_PROTOBUF_COMPILER_PYTHON_GENERATOR_H__
#include <string>
#include <google/protobuf/compiler/code_generator.h>
#include <google/protobuf/stubs/mutex.h>
#include <google/protobuf/stubs/common.h>
namespace google {
namespace protobuf {
class Descriptor;
class EnumDescriptor;
class EnumValueDescriptor;
class FieldDescriptor;
class OneofDescriptor;
class ServiceDescriptor;
namespace io { class Printer; }
namespace compiler {
namespace python {
// CodeGenerator implementation for generated Python protocol buffer classes.
// If you create your own protocol compiler binary and you want it to support
// Python output, you can do so by registering an instance of this
// CodeGenerator with the CommandLineInterface in your main() function.
class LIBPROTOC_EXPORT Generator : public CodeGenerator {
public:
Generator();
virtual ~Generator();
// CodeGenerator methods.
virtual bool Generate(const FileDescriptor* file,
const string& parameter,
GeneratorContext* generator_context,
string* error) const;
private:
void PrintImports() const;
void PrintFileDescriptor() const;
void PrintTopLevelEnums() const;
void PrintAllNestedEnumsInFile() const;
void PrintNestedEnums(const Descriptor& descriptor) const;
void PrintEnum(const EnumDescriptor& enum_descriptor) const;
void PrintTopLevelExtensions() const;
void PrintFieldDescriptor(
const FieldDescriptor& field, bool is_extension) const;
void PrintFieldDescriptorsInDescriptor(
const Descriptor& message_descriptor,
bool is_extension,
const string& list_variable_name,
int (Descriptor::*CountFn)() const,
const FieldDescriptor* (Descriptor::*GetterFn)(int) const) const;
void PrintFieldsInDescriptor(const Descriptor& message_descriptor) const;
void PrintExtensionsInDescriptor(const Descriptor& message_descriptor) const;
void PrintMessageDescriptors() const;
void PrintDescriptor(const Descriptor& message_descriptor) const;
void PrintNestedDescriptors(const Descriptor& containing_descriptor) const;
void PrintMessages() const;
void PrintMessage(const Descriptor& message_descriptor, const string& prefix,
std::vector<string>* to_register) const;
void PrintNestedMessages(const Descriptor& containing_descriptor,
const string& prefix,
std::vector<string>* to_register) const;
void FixForeignFieldsInDescriptors() const;
void FixForeignFieldsInDescriptor(
const Descriptor& descriptor,
const Descriptor* containing_descriptor) const;
void FixForeignFieldsInField(const Descriptor* containing_type,
const FieldDescriptor& field,
const string& python_dict_name) const;
void AddMessageToFileDescriptor(const Descriptor& descriptor) const;
void AddEnumToFileDescriptor(const EnumDescriptor& descriptor) const;
void AddExtensionToFileDescriptor(const FieldDescriptor& descriptor) const;
void AddServiceToFileDescriptor(const ServiceDescriptor& descriptor) const;
string FieldReferencingExpression(const Descriptor* containing_type,
const FieldDescriptor& field,
const string& python_dict_name) const;
template <typename DescriptorT>
void FixContainingTypeInDescriptor(
const DescriptorT& descriptor,
const Descriptor* containing_descriptor) const;
void FixForeignFieldsInExtensions() const;
void FixForeignFieldsInExtension(
const FieldDescriptor& extension_field) const;
void FixForeignFieldsInNestedExtensions(const Descriptor& descriptor) const;
void PrintServices() const;
void PrintServiceDescriptors() const;
void PrintServiceDescriptor(const ServiceDescriptor& descriptor) const;
void PrintServiceClass(const ServiceDescriptor& descriptor) const;
void PrintServiceStub(const ServiceDescriptor& descriptor) const;
void PrintDescriptorKeyAndModuleName(
const ServiceDescriptor& descriptor) const;
void PrintEnumValueDescriptor(const EnumValueDescriptor& descriptor) const;
string OptionsValue(const string& class_name,
const string& serialized_options) const;
bool GeneratingDescriptorProto() const;
template <typename DescriptorT>
string ModuleLevelDescriptorName(const DescriptorT& descriptor) const;
string ModuleLevelMessageName(const Descriptor& descriptor) const;
string ModuleLevelServiceDescriptorName(
const ServiceDescriptor& descriptor) const;
template <typename DescriptorT, typename DescriptorProtoT>
void PrintSerializedPbInterval(
const DescriptorT& descriptor, DescriptorProtoT& proto) const;
void FixAllDescriptorOptions() const;
void FixOptionsForField(const FieldDescriptor& field) const;
void FixOptionsForOneof(const OneofDescriptor& oneof) const;
void FixOptionsForEnum(const EnumDescriptor& descriptor) const;
void FixOptionsForMessage(const Descriptor& descriptor) const;
void CopyPublicDependenciesAliases(
const string& copy_from, const FileDescriptor* file) const;
// Very coarse-grained lock to ensure that Generate() is reentrant.
// Guards file_, printer_ and file_descriptor_serialized_.
mutable Mutex mutex_;
mutable const FileDescriptor* file_; // Set in Generate(). Under mutex_.
mutable string file_descriptor_serialized_;
mutable io::Printer* printer_; // Set in Generate(). Under mutex_.
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Generator);
};
} // namespace python
} // namespace compiler
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_COMPILER_PYTHON_GENERATOR_H__
```
|
```php
<?php
/*
*
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
*/
namespace Google\Service\SA360;
class GoogleAdsSearchads360V0ResourcesConversionAction extends \Google\Model
{
/**
* @var string
*/
public $appId;
protected $attributionModelSettingsType = your_sha256_hashSettings::class;
protected $attributionModelSettingsDataType = '';
/**
* @var string
*/
public $category;
/**
* @var string
*/
public $clickThroughLookbackWindowDays;
/**
* @var string
*/
public $creationTime;
protected $floodlightSettingsType = your_sha256_hashgs::class;
protected $floodlightSettingsDataType = '';
/**
* @var string
*/
public $id;
/**
* @var bool
*/
public $includeInClientAccountConversionsMetric;
/**
* @var bool
*/
public $includeInConversionsMetric;
/**
* @var string
*/
public $name;
/**
* @var string
*/
public $ownerCustomer;
/**
* @var bool
*/
public $primaryForGoal;
/**
* @var string
*/
public $resourceName;
/**
* @var string
*/
public $status;
/**
* @var string
*/
public $type;
protected $valueSettingsType = GoogleAdsSearchads360V0ResourcesConversionActionValueSettings::class;
protected $valueSettingsDataType = '';
/**
* @param string
*/
public function setAppId($appId)
{
$this->appId = $appId;
}
/**
* @return string
*/
public function getAppId()
{
return $this->appId;
}
/**
* @param your_sha256_hashSettings
*/
public function setAttributionModelSettings(your_sha256_hashSettings $attributionModelSettings)
{
$this->attributionModelSettings = $attributionModelSettings;
}
/**
* @return your_sha256_hashSettings
*/
public function getAttributionModelSettings()
{
return $this->attributionModelSettings;
}
/**
* @param string
*/
public function setCategory($category)
{
$this->category = $category;
}
/**
* @return string
*/
public function getCategory()
{
return $this->category;
}
/**
* @param string
*/
public function setClickThroughLookbackWindowDays($clickThroughLookbackWindowDays)
{
$this->clickThroughLookbackWindowDays = $clickThroughLookbackWindowDays;
}
/**
* @return string
*/
public function getClickThroughLookbackWindowDays()
{
return $this->clickThroughLookbackWindowDays;
}
/**
* @param string
*/
public function setCreationTime($creationTime)
{
$this->creationTime = $creationTime;
}
/**
* @return string
*/
public function getCreationTime()
{
return $this->creationTime;
}
/**
* @param your_sha256_hashgs
*/
public function setFloodlightSettings(your_sha256_hashgs $floodlightSettings)
{
$this->floodlightSettings = $floodlightSettings;
}
/**
* @return your_sha256_hashgs
*/
public function getFloodlightSettings()
{
return $this->floodlightSettings;
}
/**
* @param string
*/
public function setId($id)
{
$this->id = $id;
}
/**
* @return string
*/
public function getId()
{
return $this->id;
}
/**
* @param bool
*/
public function setIncludeInClientAccountConversionsMetric($includeInClientAccountConversionsMetric)
{
$this->includeInClientAccountConversionsMetric = $includeInClientAccountConversionsMetric;
}
/**
* @return bool
*/
public function getIncludeInClientAccountConversionsMetric()
{
return $this->includeInClientAccountConversionsMetric;
}
/**
* @param bool
*/
public function setIncludeInConversionsMetric($includeInConversionsMetric)
{
$this->includeInConversionsMetric = $includeInConversionsMetric;
}
/**
* @return bool
*/
public function getIncludeInConversionsMetric()
{
return $this->includeInConversionsMetric;
}
/**
* @param string
*/
public function setName($name)
{
$this->name = $name;
}
/**
* @return string
*/
public function getName()
{
return $this->name;
}
/**
* @param string
*/
public function setOwnerCustomer($ownerCustomer)
{
$this->ownerCustomer = $ownerCustomer;
}
/**
* @return string
*/
public function getOwnerCustomer()
{
return $this->ownerCustomer;
}
/**
* @param bool
*/
public function setPrimaryForGoal($primaryForGoal)
{
$this->primaryForGoal = $primaryForGoal;
}
/**
* @return bool
*/
public function getPrimaryForGoal()
{
return $this->primaryForGoal;
}
/**
* @param string
*/
public function setResourceName($resourceName)
{
$this->resourceName = $resourceName;
}
/**
* @return string
*/
public function getResourceName()
{
return $this->resourceName;
}
/**
* @param string
*/
public function setStatus($status)
{
$this->status = $status;
}
/**
* @return string
*/
public function getStatus()
{
return $this->status;
}
/**
* @param string
*/
public function setType($type)
{
$this->type = $type;
}
/**
* @return string
*/
public function getType()
{
return $this->type;
}
/**
* @param GoogleAdsSearchads360V0ResourcesConversionActionValueSettings
*/
public function setValueSettings(GoogleAdsSearchads360V0ResourcesConversionActionValueSettings $valueSettings)
{
$this->valueSettings = $valueSettings;
}
/**
* @return GoogleAdsSearchads360V0ResourcesConversionActionValueSettings
*/
public function getValueSettings()
{
return $this->valueSettings;
}
}
// Adding a class alias for backwards compatibility with the previous class name.
class_alias(GoogleAdsSearchads360V0ResourcesConversionAction::class, your_sha256_hashction');
```
|
```html
<div class="card">
<div class="header">
<h2>INTRODUCTION</h2>
</div>
<div class="body">
<p>
<b>AdminBSB - Material Design is a fully responsive and free admin template.</b>
</p>
<p>
It was developed with <a href="path_to_url" target="_blank">Bootstrap 3.x Framework</a> and <a href="path_to_url" target="_blank">Google Material Design</a> of powers.
We chosed many commonly used <a href="path_to_url" target="_blank">Jquery</a> plugins compatible with Bootstrap 3.x. for you and we restyled theme inside rules of Material Design.
We organized all files for to easy editable. This documentation will guide you through installing the template and exploring the various components.
</p>
</div>
</div>
```
|
```shell
Granting `root` access to a user
The `sticky bit` permission
Understanding `umask`
Running a command as another local user
`usermod` recipes
```
|
```java
*
* path_to_url
*
* Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package org.flowable.rest.service.api.runtime;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpStatus;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.flowable.engine.impl.cmd.ChangeDeploymentTenantIdCmd;
import org.flowable.engine.runtime.ProcessInstance;
import org.flowable.engine.test.Deployment;
import org.flowable.rest.service.BaseSpringRestTestCase;
import org.flowable.rest.service.api.RestUrls;
import org.flowable.task.api.Task;
import org.junit.Test;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
/**
* Test for REST-operation related to the activity instance query resource.
*
* @author Tijs Rademakers
*/
public class ActivityInstanceCollectionResourceTest extends BaseSpringRestTestCase {
/**
* Test querying activity instance. GET runtime/activity-instances
*/
@Test
@Deployment(resources = { "org/flowable/rest/service/api/twoTaskProcess.bpmn20.xml" })
public void testQueryActivityInstances() throws Exception {
ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("oneTaskProcess");
Task task = taskService.createTaskQuery().processInstanceId(processInstance.getId()).singleResult();
taskService.complete(task.getId());
// Set tenant on deployment
managementService.executeCommand(new ChangeDeploymentTenantIdCmd(deploymentId, "myTenant"));
ProcessInstance processInstance2 = runtimeService.startProcessInstanceByKeyAndTenantId("oneTaskProcess", "myTenant");
String url = RestUrls.createRelativeResourceUrl(RestUrls.URL_ACTIVITY_INSTANCES);
assertResultsPresentInDataResponse(url + "?activityId=processTask", 2, "processTask");
assertResultsPresentInDataResponse(url + "?activityId=processTask&finished=true", 1, "processTask");
assertResultsPresentInDataResponse(url + "?activityId=processTask&finished=false", 1, "processTask");
assertResultsPresentInDataResponse(url + "?activityId=processTask2", 1, "processTask2");
assertResultsPresentInDataResponse(url + "?activityId=processTask3", 0);
assertResultsPresentInDataResponse(url + "?activityName=Process%20task", 2, "processTask");
assertResultsPresentInDataResponse(url + "?activityName=Process%20task2", 1, "processTask2");
assertResultsPresentInDataResponse(url + "?activityName=Process%20task3", 0);
assertResultsPresentInDataResponse(url + "?activityType=userTask", 3, "processTask", "processTask2");
assertResultsPresentInDataResponse(url + "?activityType=startEvent", 2, "theStart");
assertResultsPresentInDataResponse(url + "?activityType=receiveTask", 0);
assertResultsPresentInDataResponse(url + "?processInstanceId=" + processInstance.getId(), 5, "theStart", "flow1", "processTask", "flow2", "processTask2");
assertResultsPresentInDataResponse(url + "?processInstanceId=" + processInstance2.getId(), 3, "theStart", "flow1", "processTask");
assertResultsPresentInDataResponse(url + "?processDefinitionId=" + processInstance.getProcessDefinitionId(), 8,
"theStart", "flow1", "processTask", "flow2", "processTask2");
assertResultsPresentInDataResponse(url + "?taskAssignee=kermit", 2, "processTask");
assertResultsPresentInDataResponse(url + "?taskAssignee=fozzie", 1, "processTask2");
assertResultsPresentInDataResponse(url + "?taskAssignee=fozzie2", 0);
// Without tenant ID, only activities for processinstance1
assertResultsPresentInDataResponse(url + "?withoutTenantId=true", 5);
// Tenant id
assertResultsPresentInDataResponse(url + "?tenantId=myTenant", 3, "theStart", "flow1", "processTask");
assertResultsPresentInDataResponse(url + "?tenantId=anotherTenant");
// Tenant id like
assertResultsPresentInDataResponse(url + "?tenantIdLike=" + encode("%enant"), 3, "theStart", "flow1", "processTask");
assertResultsPresentInDataResponse(url + "?tenantIdLike=anotherTenant");
}
protected void assertResultsPresentInDataResponse(String url, int numberOfResultsExpected, String... expectedActivityIds) throws JsonProcessingException, IOException {
// Do the actual call
CloseableHttpResponse response = executeRequest(new HttpGet(SERVER_URL_PREFIX + url), HttpStatus.SC_OK);
JsonNode dataNode = objectMapper.readTree(response.getEntity().getContent()).get("data");
closeResponse(response);
assertThat(dataNode).hasSize(numberOfResultsExpected);
// Check presence of ID's
if (expectedActivityIds != null) {
List<String> toBeFound = new ArrayList<>(Arrays.asList(expectedActivityIds));
Iterator<JsonNode> it = dataNode.iterator();
while (it.hasNext()) {
String activityId = it.next().get("activityId").textValue();
toBeFound.remove(activityId);
}
assertThat(toBeFound).as("Not all entries have been found in result, missing: " + StringUtils.join(toBeFound, ", ")).isEmpty();
}
}
}
```
|
The Thornthwaite Inheritance is a children's macabre crime novel by British author Gareth P. Jones. It was published in 2009.
Plot summary
Ovid and Lorelli Thornthwaite are thirteen-year-old twins and they are very unusual. They wear only black, eat only bland food, listen and play only sombre music and have no electric appliances other than light bulbs in their house. But what is even stranger is their desire to kill each other! When Lorelli and Ovid create a truce on their 13th birthday, Lorelli brings a lawyer into the house to add to their deceased parents' will. If one of the twins kills the other before their 16th birthday, the day in which they inherit half of the Thornthwaite's massive inheritance, the other will immediately be cut out of the will. But bizarre murder attempts continue to be made, and the twins, though deeply suspicious of each other, work together to uncover the explanation. The book ends with the twins promising to discontinue trying to kill each other, and hoping that they have a better life, after they have discovered the culprit who got killed by a contraption designed to kill the twins.
Critical reception
The story was quite well received by critics. It won the Doncaster Book Award, and also the Rotherham Book Awards. The Times said that the book is 'surely material for a film'. BooksforKeeps noted its similarity to Lemony Snicket's A Series of Unfortunate Events but thought it was less repetitive and manipulative towards the reader. TheBookBag praised the likeable characters, but noted about the excessive use of plot twists, and thought that the style of writing was dull.
Adaptations
A brilliant musical based on the book premiered at the Erindale Theatre in Canberra, Australia, 13–18 September 2020, with script, production and direction by Shaylie Maskell, and music written and conducted by Caleb Wells, for new Canberra theatre company Green Oak Theatre Company.
References
2009 British novels
British children's novels
Black comedy books
Novels about orphans
2009 children's books
Bloomsbury Publishing books
|
Maria Colón Sánchez (1926 - November 25, 1989) a.k.a. "La Madrina" (The Godmother), was an activist and politician who, in 1988, became the first Hispanic woman elected to the Connecticut General Assembly. She was also the founder of the Puerto Rican Parade Committee in 1964 and co-founded La Casa de Puerto Rico, the Society of Legal Services, the Spanish-American Merchants Association, the Puerto Rican Businessmen Association, and the Community Renewal Team.
Early years
Colón Sánchez (birth name: Maria Clemencia Colón Sánchez) was born in Comerío, Puerto Rico. The oldest of five siblings, she received her primary education in her hometown. Her family's poor economic situation forced her to quit school in the 8th grade, in order to help her parents with the caring of her brothers and sister. In 1954, Colón Sánchez moved to Hartford, Connecticut, to live with her aunt. She held various jobs, and her work in the tobacco fields enabled her to send money to her family in Puerto Rico.
Political activist
Colón Sánchez and her husband met political activist Olga Mele, who in 1954 was involved with the Puerto Rican community and was instrumental in convincing Puerto Ricans to register as voters for the upcoming elections.
Colón Sánchez and Mele, who were Catholics, complained that they couldn't confess at their local church (St. Peter's Church) because there were no Spanish-speaking priests. This resulted in the appointment of a priest in charge of the church's Hispanic worshipers. In 1959, Sánchez, Mele, and others at Sacred Heart Church demanded the removal of another priest from the parish, because he refused to move the Spanish-language mass out of the church basement. The petitioners' demands were met by the Chancery.
Colón Sánchez saved enough money to open her own storefront, "Maria's News Stand," on Albany Avenue in Hartford. The location would also serve her as an office for much of her political work. In 1964, she founded the Puerto Rican Parade Committee and in 1965, she was elected treasurer of the "Puerto Rican Democrats of Hartford," a political organization which she helped organize. In 1967, she organized the Spanish Action Coalition, an advocacy network.
The Comanchero riot
On August 10, 1969, a bar brawl erupted between French Canadians and Puerto Ricans in Hartford. The brawl turned into a riot known as the "Comanchero riot" and was quickly classified as a political incident. Nearly 150 men gathered at the intersection of Park and Main streets, in order to air their social and economic grievances. Colón Sánchez, who helped to maintain communication with the police, the firefighters, and the general public, organized a meeting between the men and City Councilmen Nicholas Carbone, George Athanson and City Manager Elisha Freedman.
In this meeting, the elected officials heard many complaints about the discrimination, which Latinos were being subjected to, in the city of Hartford. This included police brutality, even in instances where Latinos came to the aid of the officers. During this meeting, Councilman Athanson urged Puerto Ricans to run a candidate for city council.
Also after this meeting, the Hartford Foundation donated $78,640 to the Greater Hartford Community Council to address Puerto Rican needs. Using the funds from the Hartford Foundation, Colón Sánchez and her colleagues transformed the Spanish Action Coalition into La Casa de Puerto Rico, the community's oldest social service agency.
Educational reform for Bilingual Education
In 1971, Colón Sánchez, with the help of Perry Alan Zirkel, a professor at the University of Hartford, conceived the idea of creating a federally funded teacher recruitment program. The program, known as the Teachers Corps, hired Spanish-speaking teachers who could address the educational needs of Latino students, many of whom spoke Spanish as a primary language.
Colón Sánchez, together with Edna Negron Rosario, led the fight for mandatory bilingual education in Hartford. Though she was met with resistance from the non-Hispanic community who opposed the idea of bilingual education, Colón Sánchez persisted and, in 1972, she helped to open La Escuelita (The Little School), the first bilingual school in the state of Connecticut.
In 1973, with support from the Democratic Party, the Campaign Committee of the Bilingual Task Force, and a group organized by La Casa de Puerto Rico, Colón Sánchez won a seat on the Hartford Board of Education. She became the first Puerto Rican elected to public office in Hartford. She served on the Board for 16 years, during which time she continued to promote bilingual and bicultural education. In 1976 Colón Sánchez filed a lawsuit, which she won in 1978, mandating bilingual education throughout the entire Hartford public school system.
Colón Sánchez co-founded of the Society of Legal Services, the Spanish-American Merchants Association, the Puerto Rican Businessmen Association, and the Community Renewal Team.
Elected State Representative
In 1979, Colón Sánchez expressed her desire to run for a seat as Councilwoman. She was supported by the Democratic town committee; and endorsed by the mayor, several councilmen, and the Hispanic Democratic Reform Club. Despite all of this support for Colón Sánchez, Councilman Nicholas Carbone, who was a powerful figure in the Democratic political machine, supported Mildred Torres instead. Carbone may have been prejudiced against Colón Sánchez's physical appearance and made her feel that she just wasn't ready. According to a statement made by Carbone when she ran for the school board:
We ran María for the school board and she spoke broken English. She wasn't an attractive woman, she was heavy, so where would you take her? How did you sell her? How did you get people to vote for her? And how did you get over the prejudice that she spoke with a thick accent? So we didn't take her into a lot of neighborhoods ... You had to be practical as you were trying to get her on the school board.
Torres won the election, and became the first Puerto Rican to occupy a seat on Hartford's city council.
In 1988, Abe Giles, a member of the Democratic town committee in the sixth district, managed to have Colón Sánchez ousted from the committee. Colón Sánchez reacted to Giles's actions by running against him in a primary for a seat on the state legislature. She won the primary and in November of that year won the election. She thus became the first Hispanic woman elected to the Connecticut General Assembly.
Colón Sánchez held her seat until November 25, 1989, when she was found dead of a heart attack in her apartment . Bishop Peter Rosazza who celebrated the mass of her funeral, stated that "she was well respected and strong." At the time of her death she was survived by four brothers: Jose J. Colón of Hartford; Benigno Colón, Arcardio Colón, and Pedro Colón and a sister, Luz Consuelo Colón de Fuentes.
Legacy
In addition to being the first Puerto Rican to occupy a seat on Hartford's city council, and the first Hispanic woman elected to the Connecticut General Assembly, Maria Colón Sánchez left behind a lifetime of leadership and community empowerment. She was a role model for her generation, and every generation which followed.
The state of Connecticut honored her memory in 1993, by naming a school on Babcock Street in Frog Hollow the "María Sánchez Elementary School." A street close to her Newsstand was named "Maria Colón Sanchez Way" in her honor.
The Connecticut Institute for Community Development (CICD) Puerto Rican Parade, Inc. – Hartford Chapter created the annual "Maria C. Sanchez Award." Recipients are selected based on their outstanding contributions to the Puerto Rican and Hispanic community in the areas of leadership, education, community empowerment, cultural enrichment, and activism.
On October 14, 1993, Colón Sánchez was commemorated, along with other distinguished Hartford citizens, in the Hartford Public Library's Plaza of Fame. In 1995, she was inducted into the Connecticut Women's Hall of Fame.
See also
List of Puerto Ricans
References
1926 births
1989 deaths
Puerto Rican activists
Puerto Rican women activists
People from Comerío, Puerto Rico
Connecticut city council members
Connecticut Democrats
Women state legislators in Connecticut
Members of the Connecticut House of Representatives
Politicians from Hartford, Connecticut
20th-century American politicians
Women city councillors in Connecticut
20th-century American women politicians
|
In-target probe, or ITP is a device used in computer hardware and microprocessor design, to control a target microprocessor or similar ASIC at the register level. It generally allows full control of the target device and allows the computer engineer access to individual processor registers, program counter, and instructions within the device. It allows the processor to be single-stepped or for breakpoints to be set. Unlike an in-circuit emulator (ICE), an In-Target Probe uses the target device to execute, rather than substituting for the target device.
See also
Hardware-assisted virtualization
In-circuit emulator
Joint Test Action Group
External links
ITP700 Debug Port Design Guide - Intel
Embedded systems
Debugging
|
```turing
$ cat > dune-project <<EOF
> (lang dune 1.11)
> EOF
$ cat > dune <<EOF
> (rule
> (targets a b)
> (action (with-stdout-to %{targets} (echo "hola"))))
> EOF
$ dune build a
File "dune", line 3, characters 26-36:
3 | (action (with-stdout-to %{targets} (echo "hola"))))
^^^^^^^^^^
Error: Variable %{targets} expands to 2 values, however a single value is
expected here. Please quote this atom.
[1]
# CR-someday aalekseyev: the suggestion above is nonsense!
# quoting the atom will achieve nothing.
$ cat > dune <<EOF
> (rule
> (targets a b)
> (action (bash "echo hola > %{targets}")))
> EOF
$ dune build a
File "dune", lines 1-3, characters 0-65:
1 | (rule
2 | (targets a b)
3 | (action (bash "echo hola > %{targets}")))
Error: Rule failed to generate the following targets:
- b
[1]
^ the echo command may succeed, but what it does is total nonsense
Therefore the user is encouraged to write singular [target] where it makes sense
to get a better error message:
$ cat > dune <<EOF
> (rule
> (targets a b)
> (action (bash "echo hola > %{target}")))
> EOF
$ dune build a
File "dune", line 3, characters 29-38:
3 | (action (bash "echo hola > %{target}")))
^^^^^^^^^
Error: You can only use the variable %{target} if you defined the list of
targets using the field [target] (not [targets])
[1]
^ Expected error message
$ cat > dune <<EOF
> (rule
> (targets a)
> (target a)
> (action (bash "echo hola > %{target}")))
> EOF
$ dune build a
File "dune", lines 1-4, characters 0-75:
1 | (rule
2 | (targets a)
3 | (target a)
4 | (action (bash "echo hola > %{target}")))
Error: fields "target" and "targets" are mutually exclusive.
[1]
^ Specifying both [targets] and [target] is not allowed
$ cat > dune <<EOF
> (rule
> (targets a)
> (action (bash "echo hola > %{target}")))
> EOF
$ dune build a
File "dune", line 3, characters 29-38:
3 | (action (bash "echo hola > %{target}")))
^^^^^^^^^
Error: You can only use the variable %{target} if you defined the list of
targets using the field [target] (not [targets])
[1]
^ Expected error
$ cat > dune <<EOF
> (rule
> (target a)
> (action (bash "echo hola > %{targets}")))
> EOF
$ dune build a
File "dune", line 3, characters 29-39:
3 | (action (bash "echo hola > %{targets}")))
^^^^^^^^^^
Error: You can only use the variable %{targets} if you defined the list of
targets using the field [targets] (not [target])
[1]
^ Expected error
```
|
```java
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.oracle.svm.core.reflect.target;
import java.lang.reflect.Method;
import com.oracle.svm.core.annotate.Alias;
import com.oracle.svm.core.annotate.TargetClass;
@TargetClass(java.lang.reflect.RecordComponent.class)
final class Target_java_lang_reflect_RecordComponent {
@Alias Class<?> clazz;
@Alias String name;
@Alias Class<?> type;
@Alias Method accessor;
@Alias String signature;
@Alias byte[] annotations;
@Alias byte[] typeAnnotations;
}
```
|
Elvira Badaracco (22 May 1911 — 21 January 1994) was an Italian politician, socialist, writer and feminist activist.
Biography
Elvira Badaracco was born in Alessandria, Italy. As a teenager she moved with her parents to Milan.
Political career
From 1963 she began to devote herself to political activity; the following year she enrolled in the Italian Socialist Party, dealing with social and women's issues. In those years she also joined the Unione donne italiane. From 1964 to 1970 she was secretary of the Morandi Section, elected councilor for zone 5 of Milan and, as party representative, entered the Administrative Board of the Ronzoni Surgical Institute and then into the Board of Directors of Clinical Specialization Institutes. As provincial and then regional head of the PSI she organized many conferences on the history of socialist women, work, health of women workers and abortion. In 1974 she began writing articles at L'Avanti! and other national newspapers. From 1979 to 1980 she became city councilor in Milan for the PSI.
Center for Historical Studies
In 1979 she founded the "Center for Historical Studies on the Women's Liberation Movement in Italy" with Pierrette Coppa and served as its president until her death in 1994. The Center was created:
With the aim of collecting, organizing, preserving and making available the wealth of knowledge and practices developed by the women's movement, in the belief that the protection and enhancement of the history of feminism and the history of women in general constitutes a value not only scientific and cultural, but also - and above all - political.
The Center carries out an intense political and cultural activity in Milan and collects documentation on the feminist movement on an ongoing basis. In her will, Elvira Badaracco donated her assets to the Study Center, naming Annarita Buttafuoco as the life guarantor of the economic, scientific and political heritage she left behind, requiring the transformation of the Center into a Foundation. The Elvira Badaracco Foundation was established in December 1994 and Marina Zancan was named president. On the death of Annarita Buttafuoco in 1999, the role of guarantor passed to Marina Zancan.
Selected publications
Francesco Dambrosio, Elvira Badaracco and Mauro Buscaglia, Maternita cosciente, contraccezione e aborto, 2. ed., Milano, Mazzotta, 1976.
Donne e socialismo, in Donne e internazionalismo, Milano, Lega internazionale per i diritti e la liberazione dei popoli, 1980.
References
1911 births
1994 deaths
Italian feminists
Italian socialists
Italian Socialist Party politicians
Italian socialist feminists
|
The United States Penitentiary, Tucson (USP Tucson) is a high-security United States federal prison for male inmates in Arizona. It is part of the Tucson Federal Correctional Complex (FCC Tucson) and is operated by the Federal Bureau of Prisons, a division of the United States Department of Justice. The facility also has a satellite prison camp for minimum-security male offenders.
USP Tucson is located within Tucson's city limits, southeast of downtown Tucson.
History
The Federal Bureau of Prisons drafted a report on March 28, 2001 naming Tucson as an ideal site for a new federal prison housing either 1,100 medium security or 1,000 high security inmates. A hearing was arranged the following May.
Construction was completed in 2005 at a cost of about $100 million, but additional preparations took over a year before inmates could be received. The facility is situated on a property and designed for 1,500 inmates, though officials had at one time planned to limit the population to around 960. The minimum-security work camp provides labor for day-to-day operations of the federal prison complex. It has been described as "its own little city" by Josias Salazar, executive assistant of the prison complex. The opening of the penitentiary on February 5, 2007 worsened a local shortage of prison officers and was cited by residents for adding to the street traffic generated by the various prison facilities.
Sex Offender Management Program
USP Tucson is one of several federal prisons that offers a Sex Offender Management Program (SOMP) and therefore has a higher proportion of sex offenders in its general population. Having a larger number of sex offenders at SOMP facilities ensures that inmates feel safe about participating in treatment. USP Tucson offers a Non-Residential Sex Offender Treatment Program (SOTP-NR), which is a moderate intensity program designed for low to moderate risk sexual offenders. Many of the inmates in the SOTP-NR are first-time offenders serving a sentence for an Internet sex crime. All SOMP institutions offer the SOTP-NR. Eligible
inmates are transferred to a SOMP facility based on their treatment needs and security level. USP Tucson houses several high-profile sex offenders.
Notable incidents
The penitentiary went into lockdown on May 28, 2009 after several inmates were hospitalized from fights involving improvised weapons. Another inmate, Joseph William Nichols, was sentenced to 33 more months after being caught on August 12, 2009 with a concealed plastic shank that had been fashioned from his prison chair. A search of the kitchen where Nichols had been assigned resulted in the discovery of hidden contraband packages containing weapons and drug paraphernalia.
Media coverage
In July 2010, a San Diego CityBeat reporter mailed former congressman Randy "Duke" Cunningham to inquire about his time at the prison's work camp halfway into his 100-month sentence for tax evasion, conspiracy to commit bribery, mail fraud and wire fraud. Cunningham, who has become an advocate of prison reform, responded in a handwritten letter that he spends his days there teaching fellow inmates to obtain their GED. He wrote: "[Too] many students have severe learning disabilities from either drugs or genetic[s]. During the past 4 years only one of my students was unable to graduate—I taught him life skills, using a calculator to add, subtract, [multiply and divide]. This way he could at least balance a check book."
Notable inmates (current and former)
Inmates who were released from custody prior to 1982 are not listed on the Bureau of Prisons website.
High-profile
Political figures
Terrorists
Organized crime figures
Others
See also
List of U.S. federal prisons
Federal Bureau of Prisons
Incarceration in the United States
Sex offender registries in the United States
References
External links
USP Tucson at the Federal Bureau of Prisons (Official site)
While imprisoned, Pickard authored the acclaimed memoir "The Rose Of Paracelsus: On Secrets and Sacraments" http://www.createspace.com/5377339
2007 establishments in Arizona
Buildings and structures in Tucson, Arizona
Tucson
Tucson
|
```rust
//
// Unless required by applicable law or agreed to in writing, software distributed
extern crate edn;
extern crate mentat_core;
extern crate core_traits;
extern crate mentat_query_algebrizer;
extern crate query_algebrizer_traits;
mod utils;
use core_traits::{
Attribute,
ValueType,
TypedValue,
ValueTypeSet,
};
use mentat_core::{
DateTime,
Schema,
Utc,
};
use edn::query::{
Keyword,
PlainSymbol,
Variable,
};
use query_algebrizer_traits::errors::{
AlgebrizerError,
};
use mentat_query_algebrizer::{
EmptyBecause,
Known,
QueryInputs,
};
use utils::{
add_attribute,
alg,
alg_with_inputs,
associate_ident,
bails,
};
fn prepopulated_schema() -> Schema {
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "date"), 65);
associate_ident(&mut schema, Keyword::namespaced("foo", "double"), 66);
associate_ident(&mut schema, Keyword::namespaced("foo", "long"), 67);
add_attribute(&mut schema, 65, Attribute {
value_type: ValueType::Instant,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 66, Attribute {
value_type: ValueType::Double,
multival: false,
..Default::default()
});
add_attribute(&mut schema, 67, Attribute {
value_type: ValueType::Long,
multival: false,
..Default::default()
});
schema
}
#[test]
fn test_instant_predicates_require_instants() {
let schema = prepopulated_schema();
let known = Known::for_schema(&schema);
// You can't use a string for an inequality: this is a straight-up error.
let query = r#"[:find ?e
:where
[?e :foo/date ?t]
[(> ?t "2017-06-16T00:56:41.257Z")]]"#;
assert_eq!(bails(known, query),
AlgebrizerError::InvalidArgumentType(
PlainSymbol::plain(">"),
ValueTypeSet::of_numeric_and_instant_types(),
1));
let query = r#"[:find ?e
:where
[?e :foo/date ?t]
[(> "2017-06-16T00:56:41.257Z", ?t)]]"#;
assert_eq!(bails(known, query),
AlgebrizerError::InvalidArgumentType(
PlainSymbol::plain(">"),
ValueTypeSet::of_numeric_and_instant_types(),
0)); // We get this right.
// You can try using a number, which is valid input to a numeric predicate.
// In this store and query, though, that means we expect `?t` to be both
// an instant and a number, so the query is known-empty.
let query = r#"[:find ?e
:where
[?e :foo/date ?t]
[(> ?t 1234512345)]]"#;
let cc = alg(known, query);
assert!(cc.is_known_empty());
assert_eq!(cc.empty_because.unwrap(),
EmptyBecause::TypeMismatch {
var: Variable::from_valid_name("?t"),
existing: ValueTypeSet::of_one(ValueType::Instant),
desired: ValueTypeSet::of_numeric_types(),
});
// You can compare doubles to longs.
let query = r#"[:find ?e
:where
[?e :foo/double ?t]
[(< ?t 1234512345)]]"#;
let cc = alg(known, query);
assert!(!cc.is_known_empty());
assert_eq!(cc.known_type(&Variable::from_valid_name("?t")).expect("?t is known"),
ValueType::Double);
}
#[test]
fn test_instant_predicates_accepts_var() {
let schema = prepopulated_schema();
let known = Known::for_schema(&schema);
let instant_var = Variable::from_valid_name("?time");
let instant_value = TypedValue::Instant(DateTime::parse_from_rfc3339("2018-04-11T19:17:00.000Z")
.map(|t| t.with_timezone(&Utc))
.expect("expected valid date"));
let query = r#"[:find ?e
:in ?time
:where
[?e :foo/date ?t]
[(< ?t ?time)]]"#;
let cc = alg_with_inputs(known, query, QueryInputs::with_value_sequence(vec![(instant_var.clone(), instant_value.clone())]));
assert_eq!(cc.known_type(&instant_var).expect("?time is known"),
ValueType::Instant);
let query = r#"[:find ?e
:in ?time
:where
[?e :foo/date ?t]
[(> ?time, ?t)]]"#;
let cc = alg_with_inputs(known, query, QueryInputs::with_value_sequence(vec![(instant_var.clone(), instant_value.clone())]));
assert_eq!(cc.known_type(&instant_var).expect("?time is known"),
ValueType::Instant);
}
#[test]
fn test_numeric_predicates_accepts_var() {
let schema = prepopulated_schema();
let known = Known::for_schema(&schema);
let numeric_var = Variable::from_valid_name("?long");
let numeric_value = TypedValue::Long(1234567);
// You can't use a string for an inequality: this is a straight-up error.
let query = r#"[:find ?e
:in ?long
:where
[?e :foo/long ?t]
[(> ?t ?long)]]"#;
let cc = alg_with_inputs(known, query, QueryInputs::with_value_sequence(vec![(numeric_var.clone(), numeric_value.clone())]));
assert_eq!(cc.known_type(&numeric_var).expect("?long is known"),
ValueType::Long);
let query = r#"[:find ?e
:in ?long
:where
[?e :foo/long ?t]
[(> ?long, ?t)]]"#;
let cc = alg_with_inputs(known, query, QueryInputs::with_value_sequence(vec![(numeric_var.clone(), numeric_value.clone())]));
assert_eq!(cc.known_type(&numeric_var).expect("?long is known"),
ValueType::Long);
}
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.