text stringlengths 8 4.13M |
|---|
use std::fs::File;
use std::io::Read;
fn main() {
let mut file = File::open("d06-input").expect("file not found");
let mut input = String::new();
file.read_to_string(&mut input).expect("something went wrong reading file");
let mut sum = 0;
let data: Vec<String> = input.split("\n\n").map(|s| s.to_string()).collect();
for line in data.iter() {
let mut group: Vec<char> = line.chars().filter(|&c| !c.is_whitespace()).collect();
group.sort();
group.dedup();
sum += group.len();
}
println!("Sum of answers: {}", sum);
} |
/// An implementation of deterministic SecureBroadcast.
use std::collections::{HashMap, HashSet};
use crate::actor::{Actor, Sig};
use crate::traits::SecureBroadcastAlgorithm;
use crdts::{CmRDT, CvRDT, Dot, VClock};
use ed25519_dalek::Keypair;
use rand::rngs::OsRng;
use serde::Serialize;
use sha2::Sha512;
#[derive(Debug)]
pub struct SecureBroadcastProc<A: SecureBroadcastAlgorithm> {
// The identity of a process is it's keypair
keypair: Keypair,
// Msgs this process has initiated and is waiting on BFT agreement for from the network.
pending_proof: HashMap<Msg<A::Op>, HashMap<Actor, Sig>>,
// The clock representing the most recently received messages from each process.
// These are messages that have been acknowledged but not yet
// This clock must at all times be greator or equal to the `delivered` clock.
received: VClock<Actor>,
// The clock representing the most recent msgs we've delivered to the underlying algorithm `algo`.
delivered: VClock<Actor>,
// The state of the algorithm that we are running BFT over.
// This can be the causal bank described in AT2, or it can be a CRDT.
algo: A,
// The set of members in this network.
peers: HashSet<Actor>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub struct ReplicatedState<A: SecureBroadcastAlgorithm> {
algo_state: A::ReplicatedState,
peers: HashSet<Actor>,
delivered: VClock<Actor>,
}
#[derive(Debug, Clone)]
pub struct Packet<Op> {
pub source: Actor,
pub dest: Actor,
pub payload: Payload<Op>,
pub sig: Sig,
}
#[derive(Debug, Clone, Serialize)]
pub enum Payload<Op> {
RequestValidation {
msg: Msg<Op>,
},
SignedValidated {
msg: Msg<Op>,
sig: Sig,
},
ProofOfAgreement {
msg: Msg<Op>,
proof: HashMap<Actor, Sig>,
},
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Hash)]
pub struct Msg<Op> {
op: BFTOp<Op>,
dot: Dot<Actor>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Hash)]
enum BFTOp<Op> {
// TODO: support peers leaving
MembershipNewPeer(Actor),
AlgoOp(Op),
}
impl<A: SecureBroadcastAlgorithm> SecureBroadcastProc<A> {
pub fn new(known_peers: HashSet<Actor>) -> Self {
let mut csprng = OsRng::new().unwrap();
let keypair = Keypair::generate::<Sha512, _>(&mut csprng);
let actor = Actor(keypair.public);
let peers = if known_peers.is_empty() {
// This is the genesis proc. It must be treated as a special case.
//
// Under normal conditions when a proc joins an existing network, it will only
// add itself to it's own peer set once it receives confirmation from the rest
// of the network that it has been accepted as a member of the network.
std::iter::once(actor).collect()
} else {
known_peers
};
Self {
keypair,
pending_proof: HashMap::new(),
algo: A::new(actor),
peers,
delivered: VClock::new(),
received: VClock::new(),
}
}
pub fn actor(&self) -> Actor {
Actor(self.keypair.public)
}
pub fn state(&self) -> ReplicatedState<A> {
ReplicatedState {
algo_state: self.algo.state(),
peers: self.peers.clone(),
delivered: self.delivered.clone(),
}
}
pub fn peers(&self) -> HashSet<Actor> {
self.peers.clone()
}
pub fn request_membership(&self) -> Vec<Packet<A::Op>> {
self.exec_bft_op(BFTOp::MembershipNewPeer(self.actor()))
}
pub fn sync_from(&mut self, state: ReplicatedState<A>) {
// TODO: !! there is no validation this state right now.
// Suggestion. Periodic BFT agreement on the state snapshot, and procs store all ProofsOfAgreement msgs they've delivered since last snapshot.
// once the list of proofs becomes large enough, collapse these proofs into the next snapshot.
//
// During onboarding, ship the last snapshot together with it's proof of agreement and the subsequent list of proofs of agreement msgs.
println!("{} syncing", self.actor());
self.peers.extend(state.peers);
self.delivered.merge(state.delivered.clone());
self.received.merge(state.delivered); // We advance received up to what we've delivered
self.algo.sync_from(state.algo_state);
}
pub fn exec_algo_op(&self, f: impl FnOnce(&A) -> Option<A::Op>) -> Vec<Packet<A::Op>> {
if let Some(op) = f(&self.algo) {
self.exec_bft_op(BFTOp::AlgoOp(op))
} else {
println!("[DSB] algo did not produce an op");
vec![]
}
}
pub fn read_state<V>(&self, f: impl FnOnce(&A) -> V) -> V {
f(&self.algo)
}
pub fn handle_packet(&mut self, packet: Packet<A::Op>) -> Vec<Packet<A::Op>> {
println!(
"[DSB] handling packet from {}->{}",
packet.source,
self.actor()
);
if self.validate_packet(&packet) {
self.process_packet(packet)
} else {
vec![]
}
}
fn process_packet(&mut self, packet: Packet<A::Op>) -> Vec<Packet<A::Op>> {
match packet.payload {
Payload::RequestValidation { msg } => {
println!("[DSB] request for validation");
self.received.apply(msg.dot);
// NOTE: we do not need to store this message, it will be sent back to us
// with the proof of agreement. Our signature will prevent tampering.
let sig = self.sign(&msg);
let validation = Payload::SignedValidated { msg, sig };
vec![self.send(packet.source, validation)]
}
Payload::SignedValidated { msg, sig } => {
println!("[DSB] signed validated");
self.pending_proof
.entry(msg.clone())
.or_default()
.insert(packet.source, sig);
let num_signatures = self.pending_proof[&msg].len();
if self.quorum(num_signatures) {
println!("[DSB] we have quorum over msg, sending proof to network");
// We have quorum, broadcast proof of agreement to network
let proof = self.pending_proof[&msg].clone();
self.broadcast(Payload::ProofOfAgreement { msg, proof })
} else {
vec![]
}
}
Payload::ProofOfAgreement { msg, .. } => {
println!("[DSB] proof of agreement");
// We may not have been in the subset of members to validate this clock
// so we may not have had the chance to increment received. We must bring
// received up to this msg's timestamp.
//
// Otherwise we won't be able to validate any future messages
// from this source.
self.received.apply(msg.dot);
self.delivered.apply(msg.dot);
// Apply the op
// TODO: factor this out into an apply() method
match msg.op {
BFTOp::MembershipNewPeer(id) => {
self.peers.insert(id);
// do we want to do some sort of onboarding here?
// ie. maybe we can send this new peer our state
}
BFTOp::AlgoOp(op) => self.algo.apply(op),
};
// TODO: Once we relax our network assumptions, we must put in an ack
// here so that the source knows that honest procs have applied the transaction
vec![]
}
}
}
fn validate_packet(&self, packet: &Packet<A::Op>) -> bool {
if !self.verify_sig(&packet.source, &packet.payload, &packet.sig) {
println!(
"[DSB/SIG] Msg failed verification {}->{}",
packet.source,
self.actor(),
);
false
} else if !self.validate_payload(packet.source, &packet.payload) {
println!(
"[DSB/BFT] Msg failed validation {}->{}",
packet.source,
self.actor()
);
false
} else {
true
}
}
fn validate_payload(&self, from: Actor, payload: &Payload<A::Op>) -> bool {
let validation_tests = match payload {
Payload::RequestValidation { msg } => vec![
(from == msg.dot.actor, "source does not match the msg dot"),
(msg.dot == self.received.inc(from), "not the next msg"),
(
self.validate_bft_op(&from, &msg.op),
"failed bft op validation",
),
],
Payload::SignedValidated { msg, sig } => vec![
(self.verify_sig(&from, &msg, sig), "failed sig verification"),
(self.actor() == msg.dot.actor, "validation not requested"),
],
Payload::ProofOfAgreement { msg, proof } => vec![
(
self.delivered.inc(from) == msg.dot,
"either already delivered or out of order msg",
),
(self.quorum(proof.len()), "not enough signatures for quorum"),
(
proof
.iter()
.all(|(signatory, _sig)| self.peers.contains(&signatory)),
"proof contains signature(s) from unknown peer(s)",
),
(
proof
.iter()
.all(|(signatory, sig)| self.verify_sig(signatory, &msg, &sig)),
"proof contains invalid signature(s)",
),
],
};
validation_tests
.into_iter()
.find(|(is_valid, _msg)| !is_valid)
.map(|(_test, msg)| println!("[DSB/INVALID] {} {:?}, {:?}", msg, payload, self))
.is_none()
}
fn validate_bft_op(&self, from: &Actor, bft_op: &BFTOp<A::Op>) -> bool {
let validation_tests = match bft_op {
BFTOp::MembershipNewPeer(actor) => {
vec![(!self.peers.contains(&actor), "peer already exists")]
}
BFTOp::AlgoOp(op) => vec![(self.algo.validate(&from, &op), "failed algo validation")],
};
validation_tests
.into_iter()
.find(|(is_valid, _msg)| !is_valid)
.map(|(_test, msg)| println!("[DSB/BFT_OP/INVALID] {} {:?}, {:?}", msg, bft_op, self))
.is_none()
}
fn exec_bft_op(&self, bft_op: BFTOp<A::Op>) -> Vec<Packet<A::Op>> {
let msg = Msg {
op: bft_op,
// We use the received clock to allow for many operations from this process
// to be pending agreement at any one point in time.
dot: self.received.inc(self.actor()),
};
println!("[DSB] {} initiating bft for msg {:?}", self.actor(), msg);
self.broadcast(Payload::RequestValidation { msg })
}
fn quorum(&self, n: usize) -> bool {
n * 3 >= self.peers.len() * 2
}
fn broadcast(&self, payload: Payload<A::Op>) -> Vec<Packet<A::Op>> {
println!("[DSB] broadcasting {}->{:?}", self.actor(), self.peers());
self.peers
.iter()
.cloned()
.map(|dest_p| self.send(dest_p, payload.clone()))
.collect()
}
fn send(&self, dest: Actor, payload: Payload<A::Op>) -> Packet<A::Op> {
let sig = self.sign(&payload);
Packet {
source: self.actor(),
dest,
payload,
sig,
}
}
fn sign(&self, blob: impl Serialize) -> Sig {
let blob_bytes = bincode::serialize(&blob).expect("Failed to serialize");
let blob_sig = self.keypair.sign::<Sha512>(&blob_bytes);
Sig(blob_sig)
}
fn verify_sig(&self, source: &Actor, blob: impl Serialize, sig: &Sig) -> bool {
let blob_bytes = bincode::serialize(&blob).expect("Failed to serialize");
source.0.verify::<Sha512>(&blob_bytes, &sig.0).is_ok()
}
}
|
use std::num::Wrapping;
use std::u16;
pub use euclid::{point2, size2};
pub type Point = euclid::default::Point2D<i32>;
pub type Size = euclid::default::Size2D<i32>;
pub type Rectangle = euclid::default::Box2D<i32>;
/// ID referring to an allocated rectangle.
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serialization", derive(Serialize, Deserialize))]
pub struct AllocId(pub(crate) u32);
impl AllocId {
pub fn serialize(&self) -> u32 {
self.0
}
pub fn deserialize(bytes: u32) -> Self {
AllocId(bytes)
}
}
const BIN_BITS: u32 = 12;
const ITEM_BITS: u32 = 12;
const GEN_BITS: u32 = 8;
const BIN_MASK: u32 = (1 << BIN_BITS) - 1;
const ITEM_MASK: u32 = ((1 << ITEM_BITS) - 1) << BIN_BITS;
const GEN_MASK: u32 = ((1 << GEN_BITS) - 1) << (BIN_BITS + ITEM_BITS);
const MAX_ITEMS_PER_BIN: u16 = (ITEM_MASK >> 12) as u16;
const MAX_BIN_COUNT: usize = BIN_MASK as usize;
const MAX_SHELF_COUNT: usize = u16::MAX as usize;
pub type ShelfIndex = u32;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serialization", derive(Serialize, Deserialize))]
struct BinIndex(u16);
impl BinIndex {
fn to_usize(self) -> usize {
self.0 as usize
}
const INVALID: Self = BinIndex(u16::MAX);
}
#[cfg_attr(feature = "serialization", derive(Serialize, Deserialize))]
struct Shelf {
y: u16,
height: u16,
bin_width: u16,
first_bin: BinIndex,
}
#[cfg_attr(feature = "serialization", derive(Serialize, Deserialize))]
struct Bin {
x: u16,
free_space: u16,
next: BinIndex,
/// Bins are cleared when their reference count goes back to zero.
refcount: u16,
/// Similar to refcount except that the counter is not decremented
/// when an item is deallocated. We only use this so that allocation
/// ids are unique within a bin.
item_count: u16,
shelf: u16,
generation: Wrapping<u8>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serialization", derive(Serialize, Deserialize))]
pub struct AllocatorOptions {
/// Align item sizes to a multiple of this alignment.
///
/// Default value: [1, 1] (no alignment).
pub alignment: Size,
/// Use vertical instead of horizontal shelves.
///
/// Default value: false.
pub vertical_bins: bool,
}
impl AllocatorOptions {
pub const DEFAULT: Self = AllocatorOptions {
vertical_bins: false,
alignment: size2(1, 1),
};
}
impl Default for AllocatorOptions {
fn default() -> Self {
AllocatorOptions::DEFAULT
}
}
/// A Shelf-packing dynamic texture atlas allocator, inspired by https://github.com/mapbox/shelf-pack/
///
/// Items are accumulated into bins which are laid out in rows (shelves) of variable height.
/// When allocating we first look for a suitable bin. If none is found, a new shelf of the desired height
/// is pushed.
///
/// Once all of the items in a bin are deallocated, the bind is removed. When the top-most shelf is empty,
/// it is removed, potentially cascading into garbage-collecting the next shelf, etc.
///
/// This allocator works well when there are a lot of items with similar sizes (typically, glyph atlases)
///
/// Note: The allocator does not attempt to coalesce empty shelves between non-empty ones. This looks like
/// it might cause fragmentation issues when deallocation leaves a lot of empty shelves in the middle but
/// it doesn't matter much in practice when items have similar heights since most new items will fill the
/// gaps instead of creating new shelves.
#[cfg_attr(feature = "serialization", derive(Serialize, Deserialize))]
pub struct AtlasAllocator {
shelves: Vec<Shelf>,
bins: Vec<Bin>,
available_height: u16,
width: u16,
height: u16,
first_unallocated_bin: BinIndex,
flip_xy: bool,
alignment: Size,
}
impl AtlasAllocator {
pub fn with_options(size: Size, options: &AllocatorOptions) -> Self {
assert!(size.width < u16::MAX as i32);
assert!(size.height < u16::MAX as i32);
let (width, height) = if options.vertical_bins {
(size.height as u16, size.width as u16)
} else {
(size.width as u16, size.height as u16)
};
AtlasAllocator {
shelves: Vec::new(),
bins: Vec::new(),
available_height: height,
width,
height,
first_unallocated_bin: BinIndex::INVALID,
flip_xy: options.vertical_bins,
alignment: options.alignment,
}
}
pub fn new(size: Size) -> Self {
Self::with_options(size, &AllocatorOptions::DEFAULT)
}
pub fn clear(&mut self) {
self.shelves.clear();
self.bins.clear();
self.first_unallocated_bin = BinIndex::INVALID;
}
pub fn size(&self) -> Size {
size2(self.width as i32, self.height as i32)
}
pub fn is_empty(&self) -> bool {
self.shelves.is_empty()
}
/// Allocate a rectangle in the atlas.
pub fn allocate(&mut self, mut requested_size: Size) -> Option<(AllocId, Rectangle)> {
if requested_size.is_empty() {
return None;
}
if requested_size.width > u16::MAX as i32 || requested_size.height > u16::MAX as i32 {
return None;
}
adjust_size(self.alignment.width, &mut requested_size.width);
adjust_size(self.alignment.height, &mut requested_size.height);
let (w, h) = convert_coordinates(self.flip_xy, requested_size.width as u16, requested_size.height as u16);
let mut selected_shelf = std::usize::MAX;
let mut selected_bin = BinIndex::INVALID;
let mut best_waste = u16::MAX;
let can_add_shelf = self.available_height >= h
&& self.shelves.len() < MAX_SHELF_COUNT
&& self.bins.len() < MAX_BIN_COUNT;
'shelves: for (shelf_index, shelf) in self.shelves.iter().enumerate() {
if shelf.height < h || shelf.bin_width < w {
continue;
}
let y_waste = shelf.height - h;
if y_waste > best_waste || (can_add_shelf && y_waste > h) {
continue;
}
let mut bin_index = shelf.first_bin;
while bin_index != BinIndex::INVALID {
let bin = &self.bins[bin_index.to_usize()];
if bin.free_space >= w && bin.item_count < MAX_ITEMS_PER_BIN {
if y_waste == 0 && bin.free_space == w {
selected_shelf = shelf_index;
selected_bin = bin_index;
break 'shelves;
}
if y_waste < best_waste {
best_waste = y_waste;
selected_shelf = shelf_index;
selected_bin = bin_index;
break;
}
}
bin_index = bin.next;
}
}
if selected_bin == BinIndex::INVALID {
if can_add_shelf {
selected_shelf = self.add_shelf(w, h);
selected_bin = self.shelves[selected_shelf].first_bin;
} else {
// Attempt to merge some empty shelves to make a big enough spot.
let selected = self.coalesce_shelves(w, h);
selected_shelf = selected.0;
selected_bin = selected.1;
}
}
if selected_bin != BinIndex::INVALID {
return self.alloc_from_bin(selected_shelf, selected_bin, w);
}
return None;
}
fn alloc_from_bin(&mut self, shelf_index: usize, bin_index: BinIndex, width: u16) -> Option<(AllocId, Rectangle)> {
let shelf = &mut self.shelves[shelf_index];
let bin = &mut self.bins[bin_index.to_usize()];
let min_x = bin.x + shelf.bin_width - bin.free_space;
let min_y = shelf.y;
let max_x = min_x + width;
let max_y = min_y + shelf.height;
let (min_x, min_y) = convert_coordinates(self.flip_xy, min_x, min_y);
let (max_x, max_y) = convert_coordinates(self.flip_xy, max_x, max_y);
bin.free_space -= width;
bin.refcount += 1;
bin.item_count += 1;
let id = AllocId(
(bin_index.0 as u32) & BIN_MASK
| ((bin.item_count as u32) << 12) & ITEM_MASK
| (bin.generation.0 as u32) << 24
);
let rectangle = Rectangle {
min: point2(min_x as i32, min_y as i32),
max: point2(max_x as i32, max_y as i32),
};
Some((id, rectangle))
}
fn add_shelf(&mut self, width: u16, height: u16) -> usize {
let height = shelf_height(height).min(self.available_height);
let num_bins = self.num_bins(width, height);
let bin_width = self.height / num_bins;
let y = self.height - self.available_height;
self.available_height -= height;
let shelf_index = self.shelves.len();
// Initialize the bins for our new shelf.
let mut x = 0;
let mut bin_next = BinIndex::INVALID;
for _ in 0..num_bins {
let mut bin = Bin {
next: bin_next,
x,
free_space: bin_width,
refcount: 0,
shelf: shelf_index as u16,
generation: Wrapping(0),
item_count: 0,
};
let mut bin_index = self.first_unallocated_bin;
x += bin_width;
if bin_index == BinIndex::INVALID {
bin_index = BinIndex(self.bins.len() as u16);
self.bins.push(bin);
} else {
let idx = bin_index.to_usize();
bin.generation = self.bins[idx].generation + Wrapping(1);
self.first_unallocated_bin = self.bins[idx].next;
self.bins[idx] = bin;
}
bin_next = bin_index;
}
self.shelves.push(Shelf {
y,
height,
bin_width,
first_bin: bin_next,
});
shelf_index
}
/// find a sequence of consecutive shelves that can be coalesced into a single one
/// tall enough to fit the provided size.
///
/// If such a sequence is found, grow the height of first shelf and squash the other
/// ones to zero.
/// The squashed shelves are not removed, their height is just set to zero so no item
/// can go in, and they will be garbage-collected whenever there's no shelf above them.
/// For simplicity, the bin width is not modified.
fn coalesce_shelves(&mut self, w: u16, h: u16) -> (usize, BinIndex) {
let len = self.shelves.len();
let mut coalesce_range = None;
let mut coalesced_height = 0;
'outer: for shelf_index in 0..len {
if self.shelves[shelf_index].bin_width < w {
continue;
}
if !self.shelf_is_empty(shelf_index) {
continue;
}
coalesced_height = self.shelves[shelf_index].height;
for i in 1..3 {
if shelf_index + i >= len {
break 'outer;
}
if !self.shelf_is_empty(shelf_index + i) {
continue 'outer;
}
coalesced_height += self.shelves[shelf_index + i].height;
if coalesced_height >= h {
coalesce_range = Some(shelf_index .. (shelf_index + i + 1));
break 'outer;
}
}
}
if let Some(range) = coalesce_range {
for i in range.start + 1 .. range.end {
self.shelves[i].height = 0;
}
let shelf_index = range.start;
let shelf = &mut self.shelves[shelf_index];
shelf.height = coalesced_height;
return (shelf_index, shelf.first_bin);
}
(0, BinIndex::INVALID)
}
fn num_bins(&self, width: u16, height: u16) -> u16 {
match self.width / u16::max(width, height) {
0 ..= 4 => 1,
5 ..= 15 => 2,
16 ..= 64 => 4,
65 ..= 256 => 8,
_ => 16,
}.min((MAX_BIN_COUNT - self.bins.len()) as u16)
}
/// Deallocate a rectangle in the atlas.
pub fn deallocate(&mut self, id: AllocId) {
if self.deallocate_from_bin(id) {
self.cleanup_shelves();
}
}
/// Returns true if we should garbage-collect the shelves as a result of
/// removing this element (we deallocated the last item from the bin on
/// the top-most shelf).
fn deallocate_from_bin(&mut self, id: AllocId) -> bool {
let bin_index = (id.0 & BIN_MASK) as usize;
let generation = ((id.0 & GEN_MASK) >> 24 ) as u8;
let bin = &mut self.bins[bin_index];
let expected_generation = bin.generation.0;
assert_eq!(generation, expected_generation);
assert!(bin.refcount > 0);
bin.refcount -= 1;
let shelf = &self.shelves[bin.shelf as usize];
let bin_is_empty = bin.refcount == 0;
if bin_is_empty {
bin.free_space = shelf.bin_width;
}
bin_is_empty && bin.shelf as usize == self.shelves.len() - 1
}
fn cleanup_shelves(&mut self) {
while self.shelves.len() > 0 {
{
let shelf = self.shelves.last().unwrap();
let mut bin_index = shelf.first_bin;
let mut last_bin = shelf.first_bin;
while bin_index != BinIndex::INVALID {
let bin = &self.bins[bin_index.to_usize()];
if bin.refcount != 0 {
return;
}
last_bin = bin_index;
bin_index = bin.next;
}
// We didn't run into any bin on this shelf with live elements,
// this means we can remove it.
// Can't have a shelf with no bins.
debug_assert!(last_bin != BinIndex::INVALID);
// Add the bins to the free list.
self.bins[last_bin.to_usize()].next = self.first_unallocated_bin;
self.first_unallocated_bin = shelf.first_bin;
// Reclaim the height of the bin.
self.available_height += shelf.height;
}
self.shelves.pop();
}
}
fn shelf_is_empty(&self, idx: usize) -> bool {
let shelf = &self.shelves[idx];
let mut bin_index = shelf.first_bin;
while bin_index != BinIndex::INVALID {
let bin = &self.bins[bin_index.to_usize()];
if bin.refcount != 0 {
return false;
}
bin_index = bin.next;
}
true
}
}
fn convert_coordinates(flip_xy: bool, x: u16, y: u16) -> (u16, u16) {
if flip_xy {
(y, x)
} else {
(x, y)
}
}
fn shelf_height(mut size: u16) -> u16 {
let alignment = match size {
0 ..= 31 => 8,
32 ..= 127 => 16,
128 ..= 511 => 32,
_ => 64,
};
let rem = size % alignment;
if rem > 0 {
size += alignment - rem;
}
size
}
/// Dump a visual representation of the atlas in SVG format.
pub fn dump_svg(atlas: &AtlasAllocator, output: &mut dyn std::io::Write) -> std::io::Result<()> {
use svg_fmt::*;
writeln!(
output,
"{}",
BeginSvg {
w: atlas.width as f32,
h: atlas.height as f32
}
)?;
dump_into_svg(atlas, None, output)?;
writeln!(output, "{}", EndSvg)
}
/// Dump a visual representation of the atlas in SVG, omitting the beginning and end of the
/// SVG document, so that it can be included in a larger document.
///
/// If a rectangle is provided, translate and scale the output to fit it.
pub fn dump_into_svg(atlas: &AtlasAllocator, rect: Option<&Rectangle>, output: &mut dyn std::io::Write) -> std::io::Result<()> {
use svg_fmt::*;
let (sx, sy, tx, ty) = if let Some(rect) = rect {
(
rect.size().width as f32 / atlas.width as f32,
rect.size().height as f32 / atlas.height as f32,
rect.min.x as f32,
rect.min.y as f32,
)
} else {
(1.0, 1.0, 0.0, 0.0)
};
writeln!(
output,
r#" {}"#,
rectangle(tx, ty, atlas.width as f32 * sx, atlas.height as f32 * sy)
.fill(rgb(40, 40, 40))
.stroke(Stroke::Color(black(), 1.0))
)?;
for shelf in &atlas.shelves {
let mut bin_index = shelf.first_bin;
let y = shelf.y as f32 * sy + ty;
let h = shelf.height as f32 * sy;
while bin_index != BinIndex::INVALID {
let bin = &atlas.bins[bin_index.to_usize()];
let x = bin.x as f32 * sx + tx;
let w = (shelf.bin_width - bin.free_space) as f32 * sx;
writeln!(
output,
r#" {}"#,
rectangle(x, y, w, h)
.fill(rgb(70, 70, 180))
.stroke(Stroke::Color(black(), 1.0))
)?;
if bin.free_space > 0 {
let x_free = x + w;
let w_free = bin.free_space as f32 * sx;
writeln!(
output,
r#" {}"#,
rectangle(x_free, y, w_free, h)
.fill(rgb(50, 50, 50))
.stroke(Stroke::Color(black(), 1.0))
)?;
}
bin_index = bin.next;
}
}
Ok(())
}
fn adjust_size(alignment: i32, size: &mut i32) {
let rem = *size % alignment;
if rem > 0 {
*size += alignment - rem;
}
}
#[test]
fn atlas_basic() {
let mut atlas = AtlasAllocator::new(size2(1000, 1000));
let full = atlas.allocate(size2(1000, 1000)).unwrap().0;
assert!(atlas.allocate(size2(1, 1)).is_none());
atlas.deallocate(full);
let a = atlas.allocate(size2(10, 10)).unwrap().0;
let b = atlas.allocate(size2(50, 30)).unwrap().0;
let c = atlas.allocate(size2(12, 45)).unwrap().0;
let d = atlas.allocate(size2(60, 45)).unwrap().0;
let e = atlas.allocate(size2(1, 1)).unwrap().0;
let f = atlas.allocate(size2(128, 128)).unwrap().0;
let g = atlas.allocate(size2(256, 256)).unwrap().0;
atlas.deallocate(b);
atlas.deallocate(f);
atlas.deallocate(c);
atlas.deallocate(e);
let h = atlas.allocate(size2(500, 200)).unwrap().0;
atlas.deallocate(a);
let i = atlas.allocate(size2(500, 200)).unwrap().0;
atlas.deallocate(g);
atlas.deallocate(h);
atlas.deallocate(d);
atlas.deallocate(i);
let full = atlas.allocate(size2(1000, 1000)).unwrap().0;
assert!(atlas.allocate(size2(1, 1)).is_none());
atlas.deallocate(full);
}
#[test]
fn test_coalesce_shelves() {
let mut atlas = AtlasAllocator::new(size2(256, 256));
// Allocate 7 shelves (leaving 32px of remaining space on top).
let mut ids = Vec::new();
for _ in 0..7 {
for _ in 0..8 {
ids.push(atlas.allocate(size2(32, 32)).unwrap().0)
}
}
// Free the first shelf.
for i in 0..8 {
atlas.deallocate(ids[i]);
}
// Free the 3rd and 4th shelf.
for i in 16..32 {
atlas.deallocate(ids[i]);
}
// Not enough space left in existing shelves and above.
// even coalescing is not sufficient.
assert!(atlas.allocate(size2(70, 70)).is_none());
// Not enough space left in existing shelves and above.
// The 3rd and 4th row can be coalesced to fit this allocation, though.
let id = atlas.allocate(size2(64, 64)).unwrap().0;
// Deallocate everything
for i in 8..16 {
atlas.deallocate(ids[i]);
}
atlas.deallocate(id);
for i in 32..56 {
atlas.deallocate(ids[i]);
}
//dump_svg(&atlas, &mut std::fs::File::create("tmp.svg").expect("!!"));
assert!(atlas.is_empty());
}
|
extern crate tpp;
fn main() {
let args: Vec<String> = std::env::args().collect();
if args.len() < 2 {
println!("Usage:\n\ttpp file");
return;
}
let r = tpp::parse_file(&args[1]);
if let Ok(v) = r {
tpp::start(v);
return;
}
}
|
use ::*;
pub const EMSCRIPTEN_EVENT_TOUCHSTART: EM_EVENT_TYPE = 22;
pub const EMSCRIPTEN_EVENT_TOUCHEND: EM_EVENT_TYPE = 23;
pub const EMSCRIPTEN_EVENT_TOUCHMOVE: EM_EVENT_TYPE = 24;
pub const EMSCRIPTEN_EVENT_TOUCHCANCEL: EM_EVENT_TYPE = 25;
#[repr(C)]
#[derive(Debug)]
pub struct EmscriptenTouchPoint {
pub identifier: c_long,
pub screenX: c_long,
pub screenY: c_long,
pub clientX: c_long,
pub clientY: c_long,
pub pageX: c_long,
pub pageY: c_long,
pub isChanged: EM_BOOL,
pub onTarget: EM_BOOL,
pub targetX: c_long,
pub targetY: c_long,
pub canvasX: c_long,
pub canvasY: c_long,
}
#[repr(C)]
#[derive(Debug)]
pub struct EmscriptenTouchEvent {
pub numTouches: c_int,
pub ctrlKey: EM_BOOL,
pub shiftKey: EM_BOOL,
pub altKey: EM_BOOL,
pub metaKey: EM_BOOL,
pub touches: [EmscriptenTouchPoint; 32],
}
pub type em_touch_callback_func = Option<
unsafe extern "C" fn(
eventType: EM_EVENT_TYPE,
touchEvent: *const EmscriptenTouchEvent,
userData: *mut c_void,
) -> EM_BOOL,
>;
extern "C" {
pub fn emscripten_set_touchstart_callback(
target: *const c_char,
userData: *mut c_void,
useCapture: EM_BOOL,
callback: em_touch_callback_func,
) -> EMSCRIPTEN_RESULT;
pub fn emscripten_set_touchend_callback(
target: *const c_char,
userData: *mut c_void,
useCapture: EM_BOOL,
callback: em_touch_callback_func,
) -> EMSCRIPTEN_RESULT;
pub fn emscripten_set_touchmove_callback(
target: *const c_char,
userData: *mut c_void,
useCapture: EM_BOOL,
callback: em_touch_callback_func,
) -> EMSCRIPTEN_RESULT;
pub fn emscripten_set_touchcancel_callback(
target: *const c_char,
userData: *mut c_void,
useCapture: EM_BOOL,
callback: em_touch_callback_func,
) -> EMSCRIPTEN_RESULT;
}
|
use super::flags::CARRY;
use super::flags::HALF_CARRY;
use super::flags::ZERO;
use crate::cpu::CPU;
use crate::mmu::MMU;
pub fn execute_cb(cpu: &mut CPU, mmu: &mut MMU) -> u8 {
let op_code = cpu.fetch_byte(mmu);
match op_code {
0x00 => rlc(&mut cpu.regs.b, &mut cpu.regs.f),
0x01 => rlc(&mut cpu.regs.c, &mut cpu.regs.f),
0x02 => rlc(&mut cpu.regs.d, &mut cpu.regs.f),
0x03 => rlc(&mut cpu.regs.e, &mut cpu.regs.f),
0x04 => rlc(&mut cpu.regs.h, &mut cpu.regs.f),
0x05 => rlc(&mut cpu.regs.l, &mut cpu.regs.f),
0x06 => func_hl(cpu, mmu, rlc),
0x07 => rlc(&mut cpu.regs.a, &mut cpu.regs.f),
0x08 => rrc(&mut cpu.regs.b, &mut cpu.regs.f),
0x09 => rrc(&mut cpu.regs.c, &mut cpu.regs.f),
0x0a => rrc(&mut cpu.regs.d, &mut cpu.regs.f),
0x0b => rrc(&mut cpu.regs.e, &mut cpu.regs.f),
0x0c => rrc(&mut cpu.regs.h, &mut cpu.regs.f),
0x0d => rrc(&mut cpu.regs.l, &mut cpu.regs.f),
0x0e => func_hl(cpu, mmu, rrc),
0x0f => rrc(&mut cpu.regs.a, &mut cpu.regs.f),
0x10 => rl(&mut cpu.regs.b, &mut cpu.regs.f),
0x11 => rl(&mut cpu.regs.c, &mut cpu.regs.f),
0x12 => rl(&mut cpu.regs.d, &mut cpu.regs.f),
0x13 => rl(&mut cpu.regs.e, &mut cpu.regs.f),
0x14 => rl(&mut cpu.regs.h, &mut cpu.regs.f),
0x15 => rl(&mut cpu.regs.l, &mut cpu.regs.f),
0x16 => func_hl(cpu, mmu, rl),
0x17 => rl(&mut cpu.regs.a, &mut cpu.regs.f),
0x18 => rr(&mut cpu.regs.b, &mut cpu.regs.f),
0x19 => rr(&mut cpu.regs.c, &mut cpu.regs.f),
0x1a => rr(&mut cpu.regs.d, &mut cpu.regs.f),
0x1b => rr(&mut cpu.regs.e, &mut cpu.regs.f),
0x1c => rr(&mut cpu.regs.h, &mut cpu.regs.f),
0x1d => rr(&mut cpu.regs.l, &mut cpu.regs.f),
0x1e => func_hl(cpu, mmu, rr),
0x1f => rr(&mut cpu.regs.a, &mut cpu.regs.f),
0x20 => sla(&mut cpu.regs.b, &mut cpu.regs.f),
0x21 => sla(&mut cpu.regs.c, &mut cpu.regs.f),
0x22 => sla(&mut cpu.regs.d, &mut cpu.regs.f),
0x23 => sla(&mut cpu.regs.e, &mut cpu.regs.f),
0x24 => sla(&mut cpu.regs.h, &mut cpu.regs.f),
0x25 => sla(&mut cpu.regs.l, &mut cpu.regs.f),
0x26 => func_hl(cpu, mmu, sla),
0x27 => sla(&mut cpu.regs.a, &mut cpu.regs.f),
0x28 => sra(&mut cpu.regs.b, &mut cpu.regs.f),
0x29 => sra(&mut cpu.regs.c, &mut cpu.regs.f),
0x2a => sra(&mut cpu.regs.d, &mut cpu.regs.f),
0x2b => sra(&mut cpu.regs.e, &mut cpu.regs.f),
0x2c => sra(&mut cpu.regs.h, &mut cpu.regs.f),
0x2d => sra(&mut cpu.regs.l, &mut cpu.regs.f),
0x2e => func_hl(cpu, mmu, sra),
0x2f => sra(&mut cpu.regs.a, &mut cpu.regs.f),
0x30 => swap(&mut cpu.regs.b, &mut cpu.regs.f),
0x31 => swap(&mut cpu.regs.c, &mut cpu.regs.f),
0x32 => swap(&mut cpu.regs.d, &mut cpu.regs.f),
0x33 => swap(&mut cpu.regs.e, &mut cpu.regs.f),
0x34 => swap(&mut cpu.regs.h, &mut cpu.regs.f),
0x35 => swap(&mut cpu.regs.l, &mut cpu.regs.f),
0x36 => func_hl(cpu, mmu, swap),
0x37 => swap(&mut cpu.regs.a, &mut cpu.regs.f),
0x38 => srl(&mut cpu.regs.b, &mut cpu.regs.f),
0x39 => srl(&mut cpu.regs.c, &mut cpu.regs.f),
0x3a => srl(&mut cpu.regs.d, &mut cpu.regs.f),
0x3b => srl(&mut cpu.regs.e, &mut cpu.regs.f),
0x3c => srl(&mut cpu.regs.h, &mut cpu.regs.f),
0x3d => srl(&mut cpu.regs.l, &mut cpu.regs.f),
0x3e => func_hl(cpu, mmu, srl),
0x3f => srl(&mut cpu.regs.a, &mut cpu.regs.f),
0x40 => bit(&mut cpu.regs.b, &mut cpu.regs.f, 0),
0x41 => bit(&mut cpu.regs.c, &mut cpu.regs.f, 0),
0x42 => bit(&mut cpu.regs.d, &mut cpu.regs.f, 0),
0x43 => bit(&mut cpu.regs.e, &mut cpu.regs.f, 0),
0x44 => bit(&mut cpu.regs.h, &mut cpu.regs.f, 0),
0x45 => bit(&mut cpu.regs.l, &mut cpu.regs.f, 0),
0x46 => bit_hl(cpu, mmu, 0),
0x47 => bit(&mut cpu.regs.a, &mut cpu.regs.f, 0),
0x48 => bit(&mut cpu.regs.b, &mut cpu.regs.f, 1),
0x49 => bit(&mut cpu.regs.c, &mut cpu.regs.f, 1),
0x4a => bit(&mut cpu.regs.d, &mut cpu.regs.f, 1),
0x4b => bit(&mut cpu.regs.e, &mut cpu.regs.f, 1),
0x4c => bit(&mut cpu.regs.h, &mut cpu.regs.f, 1),
0x4d => bit(&mut cpu.regs.l, &mut cpu.regs.f, 1),
0x4e => bit_hl(cpu, mmu, 1),
0x4f => bit(&mut cpu.regs.a, &mut cpu.regs.f, 1),
0x50 => bit(&mut cpu.regs.b, &mut cpu.regs.f, 2),
0x51 => bit(&mut cpu.regs.c, &mut cpu.regs.f, 2),
0x52 => bit(&mut cpu.regs.d, &mut cpu.regs.f, 2),
0x53 => bit(&mut cpu.regs.e, &mut cpu.regs.f, 2),
0x54 => bit(&mut cpu.regs.h, &mut cpu.regs.f, 2),
0x55 => bit(&mut cpu.regs.l, &mut cpu.regs.f, 2),
0x56 => bit_hl(cpu, mmu, 2),
0x57 => bit(&mut cpu.regs.a, &mut cpu.regs.f, 2),
0x58 => bit(&mut cpu.regs.b, &mut cpu.regs.f, 3),
0x59 => bit(&mut cpu.regs.c, &mut cpu.regs.f, 3),
0x5a => bit(&mut cpu.regs.d, &mut cpu.regs.f, 3),
0x5b => bit(&mut cpu.regs.e, &mut cpu.regs.f, 3),
0x5c => bit(&mut cpu.regs.h, &mut cpu.regs.f, 3),
0x5d => bit(&mut cpu.regs.l, &mut cpu.regs.f, 3),
0x5e => bit_hl(cpu, mmu, 3),
0x5f => bit(&mut cpu.regs.a, &mut cpu.regs.f, 3),
0x60 => bit(&mut cpu.regs.b, &mut cpu.regs.f, 4),
0x61 => bit(&mut cpu.regs.c, &mut cpu.regs.f, 4),
0x62 => bit(&mut cpu.regs.d, &mut cpu.regs.f, 4),
0x63 => bit(&mut cpu.regs.e, &mut cpu.regs.f, 4),
0x64 => bit(&mut cpu.regs.h, &mut cpu.regs.f, 4),
0x65 => bit(&mut cpu.regs.l, &mut cpu.regs.f, 4),
0x66 => bit_hl(cpu, mmu, 4),
0x67 => bit(&mut cpu.regs.a, &mut cpu.regs.f, 4),
0x68 => bit(&mut cpu.regs.b, &mut cpu.regs.f, 5),
0x69 => bit(&mut cpu.regs.c, &mut cpu.regs.f, 5),
0x6a => bit(&mut cpu.regs.d, &mut cpu.regs.f, 5),
0x6b => bit(&mut cpu.regs.e, &mut cpu.regs.f, 5),
0x6c => bit(&mut cpu.regs.h, &mut cpu.regs.f, 5),
0x6d => bit(&mut cpu.regs.l, &mut cpu.regs.f, 5),
0x6e => bit_hl(cpu, mmu, 5),
0x6f => bit(&mut cpu.regs.a, &mut cpu.regs.f, 5),
0x70 => bit(&mut cpu.regs.b, &mut cpu.regs.f, 6),
0x71 => bit(&mut cpu.regs.c, &mut cpu.regs.f, 6),
0x72 => bit(&mut cpu.regs.d, &mut cpu.regs.f, 6),
0x73 => bit(&mut cpu.regs.e, &mut cpu.regs.f, 6),
0x74 => bit(&mut cpu.regs.h, &mut cpu.regs.f, 6),
0x75 => bit(&mut cpu.regs.l, &mut cpu.regs.f, 6),
0x76 => bit_hl(cpu, mmu, 6),
0x77 => bit(&mut cpu.regs.a, &mut cpu.regs.f, 6),
0x78 => bit(&mut cpu.regs.b, &mut cpu.regs.f, 7),
0x79 => bit(&mut cpu.regs.c, &mut cpu.regs.f, 7),
0x7a => bit(&mut cpu.regs.d, &mut cpu.regs.f, 7),
0x7b => bit(&mut cpu.regs.e, &mut cpu.regs.f, 7),
0x7c => bit(&mut cpu.regs.h, &mut cpu.regs.f, 7),
0x7d => bit(&mut cpu.regs.l, &mut cpu.regs.f, 7),
0x7e => bit_hl(cpu, mmu, 7),
0x7f => bit(&mut cpu.regs.a, &mut cpu.regs.f, 7),
0x80 => res(&mut cpu.regs.b, 0),
0x81 => res(&mut cpu.regs.c, 0),
0x82 => res(&mut cpu.regs.d, 0),
0x83 => res(&mut cpu.regs.e, 0),
0x84 => res(&mut cpu.regs.h, 0),
0x85 => res(&mut cpu.regs.l, 0),
0x86 => bitfunc_hl(cpu, mmu, 0, res),
0x87 => res(&mut cpu.regs.a, 0),
0x88 => res(&mut cpu.regs.b, 1),
0x89 => res(&mut cpu.regs.c, 1),
0x8a => res(&mut cpu.regs.d, 1),
0x8b => res(&mut cpu.regs.e, 1),
0x8c => res(&mut cpu.regs.h, 1),
0x8d => res(&mut cpu.regs.l, 1),
0x8e => bitfunc_hl(cpu, mmu, 1, res),
0x8f => res(&mut cpu.regs.a, 1),
0x90 => res(&mut cpu.regs.b, 2),
0x91 => res(&mut cpu.regs.c, 2),
0x92 => res(&mut cpu.regs.d, 2),
0x93 => res(&mut cpu.regs.e, 2),
0x94 => res(&mut cpu.regs.h, 2),
0x95 => res(&mut cpu.regs.l, 2),
0x96 => bitfunc_hl(cpu, mmu, 2, res),
0x97 => res(&mut cpu.regs.a, 2),
0x98 => res(&mut cpu.regs.b, 3),
0x99 => res(&mut cpu.regs.c, 3),
0x9a => res(&mut cpu.regs.d, 3),
0x9b => res(&mut cpu.regs.e, 3),
0x9c => res(&mut cpu.regs.h, 3),
0x9d => res(&mut cpu.regs.l, 3),
0x9e => bitfunc_hl(cpu, mmu, 3, res),
0x9f => res(&mut cpu.regs.a, 3),
0xa0 => res(&mut cpu.regs.b, 4),
0xa1 => res(&mut cpu.regs.c, 4),
0xa2 => res(&mut cpu.regs.d, 4),
0xa3 => res(&mut cpu.regs.e, 4),
0xa4 => res(&mut cpu.regs.h, 4),
0xa5 => res(&mut cpu.regs.l, 4),
0xa6 => bitfunc_hl(cpu, mmu, 4, res),
0xa7 => res(&mut cpu.regs.a, 4),
0xa8 => res(&mut cpu.regs.b, 5),
0xa9 => res(&mut cpu.regs.c, 5),
0xaa => res(&mut cpu.regs.d, 5),
0xab => res(&mut cpu.regs.e, 5),
0xac => res(&mut cpu.regs.h, 5),
0xad => res(&mut cpu.regs.l, 5),
0xae => bitfunc_hl(cpu, mmu, 5, res),
0xaf => res(&mut cpu.regs.a, 5),
0xb0 => res(&mut cpu.regs.b, 6),
0xb1 => res(&mut cpu.regs.c, 6),
0xb2 => res(&mut cpu.regs.d, 6),
0xb3 => res(&mut cpu.regs.e, 6),
0xb4 => res(&mut cpu.regs.h, 6),
0xb5 => res(&mut cpu.regs.l, 6),
0xb6 => bitfunc_hl(cpu, mmu, 6, res),
0xb7 => res(&mut cpu.regs.a, 6),
0xb8 => res(&mut cpu.regs.b, 7),
0xb9 => res(&mut cpu.regs.c, 7),
0xba => res(&mut cpu.regs.d, 7),
0xbb => res(&mut cpu.regs.e, 7),
0xbc => res(&mut cpu.regs.h, 7),
0xbd => res(&mut cpu.regs.l, 7),
0xbe => bitfunc_hl(cpu, mmu, 7, res),
0xbf => res(&mut cpu.regs.a, 7),
0xc0 => set(&mut cpu.regs.b, 0),
0xc1 => set(&mut cpu.regs.c, 0),
0xc2 => set(&mut cpu.regs.d, 0),
0xc3 => set(&mut cpu.regs.e, 0),
0xc4 => set(&mut cpu.regs.h, 0),
0xc5 => set(&mut cpu.regs.l, 0),
0xc6 => bitfunc_hl(cpu, mmu, 0, set),
0xc7 => set(&mut cpu.regs.a, 0),
0xc8 => set(&mut cpu.regs.b, 1),
0xc9 => set(&mut cpu.regs.c, 1),
0xca => set(&mut cpu.regs.d, 1),
0xcb => set(&mut cpu.regs.e, 1),
0xcc => set(&mut cpu.regs.h, 1),
0xcd => set(&mut cpu.regs.l, 1),
0xce => bitfunc_hl(cpu, mmu, 1, set),
0xcf => set(&mut cpu.regs.a, 1),
0xd0 => set(&mut cpu.regs.b, 2),
0xd1 => set(&mut cpu.regs.c, 2),
0xd2 => set(&mut cpu.regs.d, 2),
0xd3 => set(&mut cpu.regs.e, 2),
0xd4 => set(&mut cpu.regs.h, 2),
0xd5 => set(&mut cpu.regs.l, 2),
0xd6 => bitfunc_hl(cpu, mmu, 2, set),
0xd7 => set(&mut cpu.regs.a, 2),
0xd8 => set(&mut cpu.regs.b, 3),
0xd9 => set(&mut cpu.regs.c, 3),
0xda => set(&mut cpu.regs.d, 3),
0xdb => set(&mut cpu.regs.e, 3),
0xdc => set(&mut cpu.regs.h, 3),
0xdd => set(&mut cpu.regs.l, 3),
0xde => bitfunc_hl(cpu, mmu, 3, set),
0xdf => set(&mut cpu.regs.a, 3),
0xe0 => set(&mut cpu.regs.b, 4),
0xe1 => set(&mut cpu.regs.c, 4),
0xe2 => set(&mut cpu.regs.d, 4),
0xe3 => set(&mut cpu.regs.e, 4),
0xe4 => set(&mut cpu.regs.h, 4),
0xe5 => set(&mut cpu.regs.l, 4),
0xe6 => bitfunc_hl(cpu, mmu, 4, set),
0xe7 => set(&mut cpu.regs.a, 4),
0xe8 => set(&mut cpu.regs.b, 5),
0xe9 => set(&mut cpu.regs.c, 5),
0xea => set(&mut cpu.regs.d, 5),
0xeb => set(&mut cpu.regs.e, 5),
0xec => set(&mut cpu.regs.h, 5),
0xed => set(&mut cpu.regs.l, 5),
0xee => bitfunc_hl(cpu, mmu, 5, set),
0xef => set(&mut cpu.regs.a, 5),
0xf0 => set(&mut cpu.regs.b, 6),
0xf1 => set(&mut cpu.regs.c, 6),
0xf2 => set(&mut cpu.regs.d, 6),
0xf3 => set(&mut cpu.regs.e, 6),
0xf4 => set(&mut cpu.regs.h, 6),
0xf5 => set(&mut cpu.regs.l, 6),
0xf6 => bitfunc_hl(cpu, mmu, 6, set),
0xf7 => set(&mut cpu.regs.a, 6),
0xf8 => set(&mut cpu.regs.b, 7),
0xf9 => set(&mut cpu.regs.c, 7),
0xfa => set(&mut cpu.regs.d, 7),
0xfb => set(&mut cpu.regs.e, 7),
0xfc => set(&mut cpu.regs.h, 7),
0xfd => set(&mut cpu.regs.l, 7),
0xfe => bitfunc_hl(cpu, mmu, 7, set),
0xff => set(&mut cpu.regs.a, 7),
v => {
println!("Impossible cb instruction: {}", v);
0
}
}
}
fn func_hl(cpu: &mut CPU, mmu: &mut MMU, func: fn(&mut u8, &mut u8) -> u8) -> u8 {
let mut value = mmu.read_byte(cpu.regs.get_hl());
let cycles = func(&mut value, &mut cpu.regs.f);
mmu.write_byte(cpu.regs.get_hl(), value);
8 + cycles
}
fn bitfunc_hl(cpu: &mut CPU, mmu: &mut MMU, n: u8, func: fn(&mut u8, u8) -> u8) -> u8 {
let mut value = mmu.read_byte(cpu.regs.get_hl());
let cycles = func(&mut value, n);
mmu.write_byte(cpu.regs.get_hl(), value);
8 + cycles
}
fn bit_hl(cpu: &mut CPU, mmu: &mut MMU, n: u8) -> u8 {
let mut value = mmu.read_byte(cpu.regs.get_hl());
let cycles = bit(&mut value, &mut cpu.regs.f, n);
mmu.write_byte(cpu.regs.get_hl(), value);
4 + cycles
}
fn rlc(register: &mut u8, f: &mut u8) -> u8 {
let bit_7 = *register >> 7;
*register = (*register << 1) | bit_7;
*f = if bit_7 == 0 { 0 } else { CARRY } | if *register == 0 { ZERO } else { 0 };
8
}
fn rrc(register: &mut u8, f: &mut u8) -> u8 {
let bit_0 = *register & 1;
*register = (*register >> 1) | (bit_0 << 7);
*f = if bit_0 == 0 { 0 } else { CARRY } | if *register == 0 { ZERO } else { 0 };
8
}
fn rl(register: &mut u8, f: &mut u8) -> u8 {
let bit_7 = *register >> 7;
*register = (*register << 1) | if *f & CARRY == CARRY { 1 } else { 0 };
*f = if bit_7 == 0 { 0 } else { CARRY } | if *register == 0 { ZERO } else { 0 };
8
}
fn rr(register: &mut u8, f: &mut u8) -> u8 {
let old_carry = *f & CARRY;
*f = if *register & 1 == 1 { CARRY } else { 0 };
*register >>= 1;
*register |= if old_carry == CARRY { 0x80 } else { 0 };
*f |= if *register == 0 { ZERO } else { 0 };
8
}
fn sla(register: &mut u8, f: &mut u8) -> u8 {
let bit_7 = *register >> 7;
*register <<= 1;
*f = if bit_7 == 0 { 0 } else { CARRY } | if *register == 0 { ZERO } else { 0 };
8
}
fn sra(register: &mut u8, f: &mut u8) -> u8 {
let bit_0 = *register & 1;
let bit_7 = *register & 0x80;
*register = (*register >> 1) | bit_7;
*f = if bit_0 == 0 { 0 } else { CARRY } | if *register == 0 { ZERO } else { 0 };
8
}
fn swap(register: &mut u8, f: &mut u8) -> u8 {
let lo = *register & 0xf;
*register = (*register >> 4) | (lo << 4);
*f = if *register == 0 { ZERO } else { 0 };
8
}
fn srl(register: &mut u8, f: &mut u8) -> u8 {
*f = if *register & 1 == 1 { CARRY } else { 0 };
*register >>= 1;
*f |= if *register == 0 { ZERO } else { 0 };
8
}
fn bit(register: &mut u8, f: &mut u8, n: u8) -> u8 {
*f = (*f & CARRY) | HALF_CARRY | if ((*register >> n) & 1) == 0 { ZERO } else { 0 };
8
}
fn res(register: &mut u8, n: u8) -> u8 {
*register &= !(1 << n);
8
}
fn set(register: &mut u8, n: u8) -> u8 {
*register |= 1 << n;
8
}
|
use std::fs::File;
use std::io::{self, BufRead};
fn load_dictionary(filename: &str) -> std::io::Result<Vec<String>> {
let file = File::open(filename)?;
let mut dict = Vec::new();
for line in io::BufReader::new(file).lines() {
dict.push(line?);
}
Ok(dict)
}
fn jaro_winkler_distance(string1: &str, string2: &str) -> f64 {
let mut st1 = string1;
let mut st2 = string2;
let mut len1 = st1.chars().count();
let mut len2 = st2.chars().count();
if len1 < len2 {
std::mem::swap(&mut st1, &mut st2);
std::mem::swap(&mut len1, &mut len2);
}
if len2 == 0 {
return if len1 == 0 { 0.0 } else { 1.0 };
}
let delta = std::cmp::max(1, len1 / 2) - 1;
let mut flag = vec![false; len2];
let mut ch1_match = vec![];
for (idx1, ch1) in st1.chars().enumerate() {
for (idx2, ch2) in st2.chars().enumerate() {
if idx2 <= idx1 + delta && idx2 + delta >= idx1 && ch1 == ch2 && !flag[idx2] {
flag[idx2] = true;
ch1_match.push(ch1);
break;
}
}
}
let matches = ch1_match.len();
if matches == 0 {
return 1.0;
}
let mut transpositions = 0;
let mut idx1 = 0;
for (idx2, ch2) in st2.chars().enumerate() {
if flag[idx2] {
transpositions += (ch2 != ch1_match[idx1]) as i32;
idx1 += 1;
}
}
let m = matches as f64;
let jaro =
(m / (len1 as f64) + m / (len2 as f64) + (m - (transpositions as f64) / 2.0) / m) / 3.0;
let mut commonprefix = 0;
for (c1, c2) in st1.chars().zip(st2.chars()).take(std::cmp::min(4, len2)) {
commonprefix += (c1 == c2) as i32;
}
1.0 - (jaro + commonprefix as f64 * 0.1 * (1.0 - jaro))
}
fn within_distance<'a>(
dict: &'a Vec<String>,
max_distance: f64,
stri: &str,
max_to_return: usize,
) -> Vec<(&'a String, f64)> {
let mut arr: Vec<(&String, f64)> = dict
.iter()
.map(|w| (w, jaro_winkler_distance(stri, w)))
.filter(|x| x.1 <= max_distance)
.collect();
// The trait std::cmp::Ord is not implemented for f64, otherwise
// we could just do this:
// arr.sort_by_key(|x| x.1);
let compare_distance = |d1, d2| {
use std::cmp::Ordering;
if d1 < d2 {
Ordering::Less
} else if d1 > d2 {
Ordering::Greater
} else {
Ordering::Equal
}
};
arr.sort_by(|x, y| compare_distance(x.1, y.1));
arr[0..std::cmp::min(max_to_return, arr.len())].to_vec()
}
fn main() {
match load_dictionary("linuxwords.txt") {
Ok(dict) => {
for word in &[
"accomodate",
"definately",
"goverment",
"occured",
"publically",
"recieve",
"seperate",
"untill",
"wich",
] {
println!("Close dictionary words (distance < 0.15 using Jaro-Winkler distance) to '{}' are:", word);
println!(" Word | Distance");
for (w, dist) in within_distance(&dict, 0.15, word, 5) {
println!("{:>14} | {:6.4}", w, dist)
}
println!();
}
}
Err(error) => eprintln!("{}", error),
}
} |
use bevy::{prelude::*,};
use bevy::asset::{AssetLoader, LoadContext, LoadedAsset};
use bevy::reflect::{TypeUuid};
use bevy::utils::{BoxedFuture};
use serde::Deserialize;
use crate::{level_collision, cutscene, enemy};
// this is for hot reloading
#[derive(Default)]
pub struct LevelsAssetLoader;
impl AssetLoader for LevelsAssetLoader {
fn load<'a>(
&'a self,
bytes: &'a [u8],
load_context: &'a mut LoadContext,
) -> BoxedFuture<'a, Result<(), anyhow::Error>> {
Box::pin(async move {
println!("Level asset reloaded");
let lvl_asset = ron::de::from_bytes::<LevelInfo>(bytes)?;
load_context.set_default_asset(LoadedAsset::new(lvl_asset));
Ok(())
})
}
fn extensions(&self) -> &[&str] {
&["lvl", ]
}
}
#[derive(Default)]
pub struct AssetsLoading {
pub asset_handles: Vec<HandleUntyped>
}
pub fn check_assets_ready(
mut state: ResMut<State<crate::AppState>>,
server: Res<AssetServer>,
loading: Res<AssetsLoading>,
) {
println!("Loading...");
use bevy::asset::LoadState;
let mut ready = true;
for handle in loading.asset_handles.iter() {
match server.get_load_state(handle) {
LoadState::Failed => {
// one of our assets had an error
}
LoadState::Loaded => {
}
_ => {
ready = false;
}
}
}
if ready {
state.set(crate::AppState::MainMenu).unwrap();
}
}
#[derive(Default)]
pub struct LevelInfoState {
pub handle: Handle<LevelInfo>,
}
#[derive(Debug, Clone, Deserialize, TypeUuid)]
#[uuid = "39cadc56-aa9c-4543-8640-a018b74b5052"]
pub struct LevelInfo {
pub collision_info: level_collision::LevelCollisionInfo,
pub cutscenes: cutscene::Cutscenes,
pub enemies: Vec::<enemy::EnemySpawnPoint>,
}
|
use alloc::string::String;
use alloc::vec::Vec;
use crate::Client;
use chain::names::{AccountName, PermissionName};
use chain::permission_level::PermissionLevel;
use rpc_codegen::Fetch;
use serde::{Deserialize, Serialize};
#[derive(Fetch, Debug, Clone, Serialize)]
#[api(path="v1/chain/get_account", http_method="POST", returns="GetAccount")]
pub struct GetAccountParams {
account_name: AccountName,
}
pub const fn get_account(account_name: AccountName) -> GetAccountParams {
GetAccountParams { account_name }
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccount {
pub account_name: AccountName,
pub head_block_num: i64,
pub head_block_time: String,
pub privileged: bool,
pub last_code_update: String,
pub created: String,
pub core_liquid_balance: Option<String>,
pub ram_quota: i64,
pub net_weight: i64,
pub cpu_weight: i64,
pub net_limit: Limit,
pub cpu_limit: Limit,
pub ram_usage: i64,
pub permissions: Vec<Permission>,
pub total_resources: Option<TotalResources>,
pub self_delegated_bandwidth: Option<SelfDelegatedBandwidth>,
pub refund_request: Option<RefundRequest>,
pub voter_info: Option<VoterInfo>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Limit {
pub used: i64,
pub available: i64,
pub max: i64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Permission {
pub perm_name: PermissionName,
pub parent: PermissionName,
pub required_auth: RequiredAuth,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct RequiredAuth {
pub threshold: u32,
pub keys: Vec<KeyWeight>,
pub accounts: Vec<PermissionLevelWeight>,
pub waits: Vec<WaitWeight>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct PermissionLevelWeight {
pub permission: PermissionLevel,
pub weight: u16,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WaitWeight {
pub wait_sec: u32,
pub weight: u16,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct KeyWeight {
pub key: String,
pub weight: u32,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct TotalResources {
pub owner: AccountName,
pub net_weight: String,
pub cpu_weight: String,
pub ram_bytes: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SelfDelegatedBandwidth {
pub from: AccountName,
pub to: AccountName,
pub net_weight: String,
pub cpu_weight: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct RefundRequest {
pub owner: String,
pub request_time: String,
pub net_amount: String,
pub cpu_amount: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct VoterInfo {
pub owner: AccountName,
pub proxy: AccountName,
pub producers: Vec<AccountName>,
pub staked: u64,
pub last_vote_weight: String,
pub proxied_vote_weight: String,
pub is_proxy: u8,
}
#[cfg(feature = "use-hyper")]
#[cfg(test)]
mod test {
use super::*;
use crate::HyperClient;
use std::str::FromStr;
#[test]
fn get_account_from_str_should_work() {
let node: &'static str = "https://eos.greymass.com/";
let hyper_client = HyperClient::new(node);
let account_name = AccountName::from_str("eosio").unwrap();
let response = get_account(account_name).fetch(&hyper_client);
assert!(response.is_ok());
}
#[test]
fn get_account_from_str_invalid_account() {
let node: &'static str = "https://eos.greymass.com/";
let hyper_client = HyperClient::new(node);
let account_name = AccountName::from_str("eosio1").unwrap();
let response = get_account(account_name).fetch(&hyper_client);
if let Err(crate::Error::EosError{ ref eos_err }) = response {
assert_eq!(eos_err.code, 500);
assert_eq!(eos_err.message, "Internal Service Error");
} else {
assert!(true);
}
}
#[test]
fn get_account_by_n_should_work() {
let node: &'static str = "https://eos.greymass.com/";
let hyper_client = HyperClient::new(node);
let account_name: AccountName = AccountName::from_str("eosio").unwrap();
let response = get_account(account_name).fetch(&hyper_client);
assert!(response.is_ok())
}
#[test]
fn get_account_by_n_invalid_account() {
let node: &'static str = "https://eos.greymass.com/";
let hyper_client = HyperClient::new(node);
let account_name: AccountName = AccountName::from_str("eosio2").unwrap();
let response = get_account(account_name).fetch(&hyper_client);
if let Err(crate::Error::EosError{ ref eos_err }) = response {
assert_eq!(eos_err.code, 500);
assert_eq!(eos_err.message, "Internal Service Error");
} else {
assert!(true);
}
}
}
|
use sys::PAGE_SIZE;
use core::ops::Range;
use alloc::alloc::Layout;
use crate::uses::*;
// must be power of 2 for correct results
pub const fn align_up(addr: usize, align: usize) -> usize
{
(addr + align - 1) & !(align - 1)
}
// must be power of 2 for correct results
pub const fn align_down(addr: usize, align: usize) -> usize
{
addr & !(align - 1)
}
pub fn align_of(addr: usize) -> usize
{
if addr == 0 {
return 1 << 63;
}
let out: usize;
unsafe {
asm!("bsf {}, {}",
out(reg) out,
in(reg) addr);
}
1 << out
}
pub fn page_aligned(addr: usize) -> bool
{
align_of(addr) >= PAGE_SIZE
}
pub const fn get_bits(n: usize, bits: Range<usize>) -> usize
{
if bits.end == 0 {
return 0;
}
let l = if bits.start > 63 { 63 } else { bits.start };
let h = if bits.end > 64 { 63 } else { bits.end - 1 };
if l > h {
return 0;
}
let temp = if h == 63 {
usize::MAX
} else {
(1 << (h + 1)) - 1
};
(temp & n).wrapping_shr(l as _)
}
pub const fn get_bits_raw(n: usize, bits: Range<usize>) -> usize
{
let l = if bits.start > 63 { 63 } else { bits.start };
let h = if bits.end > 63 { 63 } else { bits.end };
if l >= h {
return 0;
}
let temp = if h == 63 {
usize::MAX
} else {
(1 << (h + 1)) - 1
};
(temp & n).wrapping_shr(l as _) << l
}
pub unsafe fn memset(mem: *mut u8, len: usize, data: u8)
{
for i in 0..len {
*mem.add(i) = data;
}
}
// rounds down
#[inline]
pub fn log2(n: usize) -> usize
{
if n == 0 {
return 0;
}
let out;
unsafe {
asm!("bsr {}, {}",
out(reg) out,
in(reg) n);
}
out
}
// rounds up
// TODO: make faster
pub fn log2_up(n: usize) -> usize
{
if n == 1 {
1
} else {
log2(align_up(n, 1 << log2(n)))
}
}
pub const fn log2_const(n: usize) -> usize
{
if n == 0 {
return 0;
}
let mut out = 0;
while get_bits(n, out..64) > 0 {
out += 1;
}
out - 1
}
pub const fn log2_up_const(n: usize) -> usize
{
if n == 1 {
1
} else {
log2_const(align_up(n, 1 << log2_const(n)))
}
}
pub unsafe fn unbound<'a, 'b, T>(r: &'a T) -> &'b T
{
(r as *const T).as_ref().unwrap()
}
pub unsafe fn unbound_mut<'a, 'b, T>(r: &'a mut T) -> &'b mut T
{
(r as *mut T).as_mut().unwrap()
}
pub fn optac<T, F>(opt: Option<T>, f: F) -> bool
where
F: FnOnce(T) -> bool,
{
match opt {
Some(val) => f(val),
None => false,
}
}
pub fn optnac<T, F>(opt: Option<T>, f: F) -> bool
where
F: FnOnce(T) -> bool,
{
match opt {
Some(val) => f(val),
None => true,
}
}
pub fn aligned_nonnull<T>(ptr: *const T) -> bool
{
core::mem::align_of::<T>() == align_of(ptr as usize) && !ptr.is_null()
}
pub fn to_heap<V>(object: V) -> *mut V
{
Box::into_raw(Box::new(object))
}
pub unsafe fn from_heap<V>(ptr: *const V) -> V
{
*Box::from_raw(ptr as *mut _)
}
// TODO: make this not require defualt
pub fn copy_to_heap<T: Copy + Default>(slice: &[T]) -> Vec<T>
{
let mut out = Vec::with_capacity(slice.len());
out.resize(slice.len(), T::default());
out.copy_from_slice(slice);
out
}
pub const fn mlayout_of<T>() -> Layout
{
unsafe { Layout::from_size_align_unchecked(size_of::<T>(), core::mem::align_of::<T>()) }
}
|
#![deny(missing_docs, warnings, clippy::all, clippy::pedantic)]
//! Scalable concurrent containers.
//!
//! # [`EBR`](ebr)
//!
//! The [`ebr`] module implements epoch-based reclamation for [`LinkedList`], [`HashMap`],
//! [`HashIndex`], and [`TreeIndex`].
//!
//! # [`LinkedList`]
//! [`LinkedList`] is a type trait that implements wait-free list modification operations for
//! a generic concurrent list.
//!
//! # [`HashMap`]
//! [`HashMap`] is a concurrent hash map that dynamically grows and shrinks without blocking
//! other operations.
//!
//! # [`HashIndex`]
//! [`HashIndex`] is a read-optimized concurrent hash index that is similar to [`HashMap`].
//!
//! # [`TreeIndex`]
//! [`TreeIndex`] is a read-optimized concurrent B+ tree index.
pub mod ebr;
mod linked_list;
pub use linked_list::LinkedList;
pub mod hash_map;
pub use hash_map::HashMap;
pub mod hash_index;
pub use hash_index::HashIndex;
pub mod hash_set;
pub use hash_set::HashSet;
pub mod tree_index;
pub use tree_index::TreeIndex;
mod hash_table;
mod tests;
|
#[doc = "Reader of register CM4_CLOCK_CTL"]
pub type R = crate::R<u32, super::CM4_CLOCK_CTL>;
#[doc = "Writer for register CM4_CLOCK_CTL"]
pub type W = crate::W<u32, super::CM4_CLOCK_CTL>;
#[doc = "Register CM4_CLOCK_CTL `reset()`'s with value 0"]
impl crate::ResetValue for super::CM4_CLOCK_CTL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `FAST_INT_DIV`"]
pub type FAST_INT_DIV_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `FAST_INT_DIV`"]
pub struct FAST_INT_DIV_W<'a> {
w: &'a mut W,
}
impl<'a> FAST_INT_DIV_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 8)) | (((value as u32) & 0xff) << 8);
self.w
}
}
impl R {
#[doc = "Bits 8:15 - Specifies the fast clock divider (from the high frequency clock 'clk_hf' to the peripheral clock 'clk_fast'). Integer division by (1+FAST_INT_DIV). Allows for integer divisions in the range \\[1, 256\\] (FAST_INT_DIV is in the range \\[0, 255\\]). Note that this field is retained. However, the counter that is used to implement the division is not and will be initialized by HW to '0' when transitioning from DeepSleep to Active power mode."]
#[inline(always)]
pub fn fast_int_div(&self) -> FAST_INT_DIV_R {
FAST_INT_DIV_R::new(((self.bits >> 8) & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 8:15 - Specifies the fast clock divider (from the high frequency clock 'clk_hf' to the peripheral clock 'clk_fast'). Integer division by (1+FAST_INT_DIV). Allows for integer divisions in the range \\[1, 256\\] (FAST_INT_DIV is in the range \\[0, 255\\]). Note that this field is retained. However, the counter that is used to implement the division is not and will be initialized by HW to '0' when transitioning from DeepSleep to Active power mode."]
#[inline(always)]
pub fn fast_int_div(&mut self) -> FAST_INT_DIV_W {
FAST_INT_DIV_W { w: self }
}
}
|
/*
irremocon <https://github.com/ak1211/irremocon>
Copyright 2019 Akihiro Yamamoto
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use crate::infrared_codes::InfraredCodes;
use rppal::gpio::{InputPin, Level};
use std::error::Error;
use std::time::Duration;
use timerfd::{SetTimeFlags, TimerFd, TimerState};
// 赤外線受信モジュールは負論理信号
const ASSERT_IR: Level = Level::Low;
const NEGATE_IR: Level = Level::High;
// キャリア周波数[kHz}
// 38kHz
const CARRIER_FREQ_KHZ: u16 = 38;
// キャリア周期[us]
// 1/38,000 * 1,000,000 = 26us
const CARRIER_PERIOD_MICROS: u16 = 1000 / CARRIER_FREQ_KHZ;
//
// ラズパイゼロではキャリア周期に同期できず
// タイミング違反が起きるので分周する
//
// プリスケーラの倍率
// 2分周
const N_OF_PRESCALER: u16 = 2;
// 分周後でのクロックカウンタ増加量
const COUNT_PACE: u16 = N_OF_PRESCALER;
// タイマー周期[us]
// キャリア周期 * プリスケーラの倍率
const TIMER_INTERVAL_MICROS: u16 = CARRIER_PERIOD_MICROS * N_OF_PRESCALER;
// この時間信号が変化しないと, 赤外線リモコン信号を読み取るのを終了する
// 34ms
const TIMEOUT_COUNTS: u16 = 34 * 1000 / TIMER_INTERVAL_MICROS;
///
/// 赤外線リモコン信号を読み取る
///
pub fn receive_ir_codes(
pin: &InputPin,
ircodes_buffer: &mut InfraredCodes,
) -> Result<(), Box<dyn Error>> {
//
let state = TimerState::Periodic {
current: Duration::from_micros(1),
interval: Duration::from_micros(TIMER_INTERVAL_MICROS as u64),
};
let mut timerfd = TimerFd::new()?;
timerfd.set_state(state, SetTimeFlags::Default);
// リモコン信号入力待ち
while pin.read() == NEGATE_IR {
timerfd.read(); // タイマー待ち
}
// リモコン信号を検出したのでカウント開始
let mut previous: Level = ASSERT_IR;
let mut count: u16 = 0;
while count < TIMEOUT_COUNTS {
if previous == pin.read() {
// 信号が変化しないならカウンタを増やす
count += COUNT_PACE;
} else {
// 信号が変化したら
// カウント値をバッファに入れて
// カウンタを初期化
ircodes_buffer.push(count);
previous = !previous;
count = 0;
}
timerfd.read(); // タイマー待ち
}
// 最後のカウント値をバッファに入れる
ircodes_buffer.push(count);
Ok(())
}
|
use proconio::{fastout, input};
#[fastout]
fn main() {
input! {
mut n: i64,
};
n -= 1;
let mut ans: Vec<u8> = Vec::new();
loop {
let remainder = n % 26;
ans.push(b'a' + remainder as u8);
if n < 26 {
break;
}
n = n / 26 - 1;
}
println!(
"{}",
ans.into_iter().rev().map(|c| c as char).collect::<String>()
);
}
|
use crate::main::{same_necklace, rotate_n};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::collections::HashSet;
mod main {
pub fn rotate_n(s: &str, n: usize) -> String {
let length = s.len();
let new_start: &str = &s[length - n..length];
let new_end: &str = &s[0..length - n];
let mut out = String::from(new_start);
out += new_end;
out
}
pub fn same_necklace(a: &str, b: &str) -> bool {
if a.len() == 0 && b.len() == 0 {
return true;
} else if a.len() != b.len() {
return false;
}
let achars: Vec<_> = a.chars().collect();
let bchars: Vec<_> = b.chars().collect();
for how_many in 0..b.len() {
if achars[a.len() - 1] == bchars[b.len() - how_many - 1] && a == rotate_n(b, how_many) {
return true;
}
}
false
}
pub fn repeats(a: &str) -> usize {
if a.len() == 0 {
1
} else {
(0..a.len())
.map(|n| rotate_n(a, n))
.filter(|s| s == a)
.count()
}
}
#[test]
fn simple_rotate_test() {
assert_eq!(rotate_n("apple", 1), "eappl");
assert_eq!(rotate_n("apple", 2), "leapp");
assert_eq!(rotate_n("apple", 0), "apple");
}
#[test]
fn test_same_necklace() {
assert!(same_necklace("nicole", "icolen"));
assert!(same_necklace("nicole", "lenico"));
assert!(!same_necklace("nicole", "coneli"));
assert!(same_necklace("aabaaaaabaab", "aabaabaabaaa"));
assert!(!same_necklace("abc", "cba"));
assert!(!same_necklace("xxyyy", "xxxyy"));
assert!(!same_necklace("xyxxz", "xxyxz"));
assert!(same_necklace("x", "x"));
assert!(!same_necklace("x", "xx"));
assert!(!same_necklace("x", ""));
assert!(same_necklace("", ""));
}
#[test]
fn test_repeats() {
assert_eq!(repeats("abc"), 1);
assert_eq!(repeats("abcabcabc"), 3);
assert_eq!(repeats("abcabcabcx"), 1);
assert_eq!(repeats("aaaaaa"), 6);
assert_eq!(repeats("a"), 1);
assert_eq!(repeats(""), 1);
}
}
fn main() {
let group_size = 4;
let file = File::open("enable1.txt").unwrap();
let buf_reader = BufReader::new(file);
let words: HashSet<String> = buf_reader.lines().map(|r| r.unwrap()).collect();
let out = words.iter().filter(|w| (0..w.len()).filter(|i| words.contains(&rotate_n(w, *i))).count() >= group_size).nth(0).unwrap();
let good_words: Vec<String> = (0..out.len()).map(|i| rotate_n(&out, i)).filter(|w| words.contains(w)).collect();
println!("{:?}", good_words);
}
fn criterion_benchmark(c: &mut Criterion) {
c.bench_function("same_necklace success test", |b| {
b.iter(|| {
same_necklace(
black_box("abcdefghijklmnopqrstuvwxyz"),
"pqrstuvwxyzabcdefghijklmno",
)
})
});
c.bench_function("same_necklace easy fail test", |b| {
b.iter(|| same_necklace(black_box("abcdefghijklmnopqrstuvwxyz"), "rocks"))
});
c.bench_function("same_necklace hard fail test", |b| {
b.iter(|| {
same_necklace(
black_box("abcdefghijklmnopqrstuvwxyz"),
"pqrrstuwxyzabcdefghijklmno",
)
})
});
}
criterion_group!(benches, criterion_benchmark);
// criterion_main!(benches);
|
//! Data structures for tracking cell/value possibilities.
use std::fmt;
use std::io::Result as IOResult;
use std::iter;
use std::ops;
use std::slice;
/// Input/output for grids
pub mod io {
pub use super::super::io::*;
}
/// Cell value index.
///
/// Represents a possible value of a cell, typically 1-9.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct ValueId(pub usize);
/// Cell position index.
///
/// This is used to address a particular cell on a grid.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct CellId(pub usize);
/// Cell/value combination index.
///
/// This is used to address the possibility that a particular cell is a particular value.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct CaseId(pub usize);
/// Utility trait to define grid-size-dependent conversions.
///
/// Very similar to the `std::convert::From` trait, except that conversions also need to know an
/// appropriate grid size. Generally used by the `HasGridSize` trait.
pub trait FromIndex<T> {
/// Converts `other` to `Self`
///
/// Note that the conversion is made under the assumption the puzzle is `size` cells wide.
fn convert(other: T, size: usize) -> Self;
}
impl<T> FromIndex<T> for T {
fn convert(t: T, _: usize) -> Self { t }
}
/// Convert between `usize` and `ValueId`
///
/// A `usize` of at least `1` and at most the size of the puzzle grid may be converted to/from a
/// `ValueId`.
///
/// # Panics
///
/// In debug builds, this will panic if `value` is outside of `1..size+1`.
impl FromIndex<usize> for ValueId {
fn convert(value: usize, size: usize) -> Self {
debug_assert!(0 < value && value <= size, "value in 1..size+1");
ValueId(value - 1)
}
}
impl FromIndex<ValueId> for usize {
fn convert(ValueId(value_id): ValueId, _: usize) -> Self {
value_id + 1
}
}
/// Convert between `(usize, usize)` and `CellId`
///
/// A `(usize, usize)` representing an (x,y) coordinate (with each element being in the range
/// `0..size`) may be converted to/from a `CellId`.
///
/// # Panics
///
/// In debug builds, this will panic if `x` or `y` is outside of `0..size`.
impl FromIndex<(usize, usize)> for CellId {
fn convert((x, y): (usize, usize), size: usize) -> Self {
debug_assert!(x < size, "x in 0..size");
debug_assert!(y < size, "y in 0..size");
CellId(size*y + x)
}
}
impl FromIndex<CellId> for (usize, usize) {
fn convert(CellId(cell_id): CellId, size: usize) -> Self {
(cell_id % size, cell_id / size)
}
}
/// Convert between `(CellId, ValueId)` and `CaseId`
impl FromIndex<(CellId, ValueId)> for CaseId {
fn convert((CellId(cell_id), ValueId(value_id)): (CellId, ValueId), size: usize) -> Self {
CaseId(size*cell_id + value_id)
}
}
/// Convert between `(ValueId, CellId)` and `CaseId`
impl FromIndex<(ValueId, CellId)> for CaseId {
fn convert((ValueId(value_id), CellId(cell_id)): (ValueId, CellId), size: usize) -> Self {
CaseId(size*cell_id + value_id)
}
}
impl FromIndex<CaseId> for (CellId, ValueId) {
fn convert(CaseId(case_id): CaseId, size: usize) -> Self {
(CellId(case_id / size), ValueId(case_id % size))
}
}
impl FromIndex<CaseId> for (ValueId, CellId) {
fn convert(CaseId(case_id): CaseId, size: usize) -> Self {
(ValueId(case_id % size), CellId(case_id / size))
}
}
/// Utility trait to store grid-size-dependent index ranges.
pub trait RangedIndex: Sized {
type RangeIter: Iterator<Item=Self>;
/// Returns iterator of all valid ids for a particular puzzle size.
// NOTE: I'd prefer to return a Range instead, but I'm not aware of a way to
// make Range<MyType> iterable.
fn range_iter(size: usize) -> Self::RangeIter;
/// Returns the range of valid indexes for a particular index type.
fn range(size: usize) -> ops::Range<Self>;
}
impl RangedIndex for ValueId {
type RangeIter = iter::Map<ops::Range<usize>, fn(usize) -> Self>;
fn range_iter(size: usize) -> Self::RangeIter { (0..size).map(ValueId) }
fn range(size: usize) -> ops::Range<Self> { ValueId(0)..ValueId(size) }
}
impl RangedIndex for CellId {
type RangeIter = iter::Map<ops::Range<usize>, fn(usize) -> Self>;
fn range_iter(size: usize) -> Self::RangeIter { (0..size*size).map(CellId) }
fn range(size: usize) -> ops::Range<Self> { CellId(0)..CellId(size*size) }
}
impl RangedIndex for CaseId {
type RangeIter = iter::Map<ops::Range<usize>, fn(usize) -> Self>;
fn range_iter(size: usize) -> Self::RangeIter { (0..size*size*size).map(CaseId) }
fn range(size: usize) -> ops::Range<Self> { CaseId(0)..CaseId(size*size*size) }
}
/// Indicates ability to perform puzzle-size-dependent index operations.
///
/// This trait indicates that something has a particular puzzle size, allowing it to perform
/// conversions between types such as `ValueId`, `CellId` and `CaseId`.
///
/// # Examples
///
/// ```rust
/// # use rusudoku::grid::*;
/// let c = SimpleIndexUtil(9);
/// let cell_id: CellId = c.convert((4, 5));
/// let value_id: ValueId = c.convert(6);
/// let case_id: CaseId = c.convert((cell_id, value_id));
/// let (value_id, cell_id): (ValueId, CellId) = c.convert(case_id);
/// let (x, y): (usize, usize) = c.convert(cell_id);
/// let value: usize = c.convert(value_id);
/// assert_eq!((x, y, value), (4, 5, 6))
/// ```
pub trait HasGridSize {
/// Returns puzzle width (in cells) that conversions assume.
fn grid_size(&self) -> usize;
/// Convert `T` into `U`, for a puzzle `conversion_size()` cells wide.
fn convert<T, U>(&self, t: T) -> U
where U: FromIndex<T> {
U::convert(t, self.grid_size())
}
/// Return iterator of all valid indexes of a particular type.
fn range_iter<T>(&self) -> <T as RangedIndex>::RangeIter
where T: RangedIndex {
<T as RangedIndex>::range_iter(self.grid_size())
}
/// Return a `Range` of valid indexes of a particular type.
fn range<T>(&self) -> ops::Range<T>
where T: RangedIndex {
<T as RangedIndex>::range(self.grid_size())
}
}
/// Simple light-weight index utility.
///
/// Allows for converting between `ValueId`, `CellId` and `CaseId` without needing to store
/// a full puzzle `Grid`. The contained `usize` represents the puzzle size (width in cells).
#[derive(Clone, Copy, Debug)]
pub struct SimpleIndexUtil(pub usize);
impl HasGridSize for SimpleIndexUtil {
fn grid_size(&self) -> usize { self.0 }
}
/// Stores the possible values for each cell of a puzzle.
///
/// A grid contains cells arranged in a square (with `size` cells per side). Each cell
/// has `size` different `bool`s, each one indicating whether the cell may be a particular
/// value.
#[derive(Clone)]
pub struct Grid {
size: usize,
cases: Vec<bool>, // TODO: Memory-efficient implementation?
}
impl Grid {
/// Creates a new `Grid` for a puzzle that is `size` cells wide.
pub fn new(size: usize) -> Grid {
Grid {
size: size,
cases: vec![true; size * size * size]
}
}
/// Creates a new `Grid` from a slice representing initial conditions.
///
/// Primarily intended to ease setup of tests and examples, so it will panic if the slice isn't
/// setup properly. The slice must be a square where each element corresponds to a cell. A value
/// of `0` indicates that the cell is unconstrained/unknown.
///
/// # Examples
///
/// ```rust
/// # use rusudoku::grid::*;
/// let grid = Grid::literal(&[
/// 1, 2, 3, 4,
/// 3, 0, 0, 2,
/// 2, 0, 0, 3,
/// 4, 3, 2, 1,
/// ]);
/// assert_eq!(grid[CellId(4)], [false, false, true, false]);
/// assert_eq!(grid[CellId(5)], [true, true, true, true ]);
/// ```
pub fn literal(values: &[usize]) -> Grid {
// This makes me a little uncomfortable, but at least
// it shouldn't be a problem for reasonable sizes.
let size = (values.len() as f64).sqrt().ceil() as usize;
assert_eq!(size*size, values.len());
let mut grid = Grid::new(size);
for ((_, dst_cell), &value) in grid.cells_mut().zip(values) {
if value > 0 {
for possibility in dst_cell.iter_mut() {
*possibility = false;
}
dst_cell[value-1] = true;
}
}
grid
}
/// Reads new `Grid` from lines iterator.
pub fn read<I>(lines: &mut I) -> Result<Grid, io::Error>
where I: Iterator<Item=IOResult<String>> {
io::GridReader::new().read(lines)
}
/// Returns the width of the grid in cells.
pub fn size(&self) -> usize { self.size }
/// Returns iterator over all cell positions/values.
///
/// Iterates over all cells in row-major order, yielding items of type `(CellId, &[bool])`,
/// representing a cell's position, whether it can be each value.
///
/// # Examples
///
/// ```rust
/// # use rusudoku::grid::*;
/// let grid = Grid::new(9);
/// let (cell, values) = grid.cells().nth(5).unwrap();
/// assert_eq!(cell, grid.convert((5, 0)));
/// assert!(values[0]); // The cell could be the value 1.
/// ```
pub fn cells(&self) -> CellsIter {
let cell_id: fn(_) -> _ = CellId;
(0..).map(cell_id).zip(self.cases.chunks(self.size))
}
/// Returns mutable iterator over all cell positions/values.
///
/// Identical to `cells()`, except that the boolean array references are mutable.
///
/// # Examples
///
/// ```rust
/// # use rusudoku::grid::*;
/// let mut grid = Grid::new(9);
/// let (cell, values) = grid.cells_mut().nth(5).unwrap();
/// values[0] = false; // Prevent the cell from being the value 1.
/// ```
pub fn cells_mut(&mut self) -> CellsIterMut {
let cell_id: fn(_) -> _ = CellId;
(0..).map(cell_id).zip(self.cases.chunks_mut(self.size))
}
fn cell_range(&self, cell_id: CellId) -> ops::Range<usize> {
let size = self.size;
cell_id.0 * size .. cell_id.0 * size + size
}
/// Returns iterator over cases/values of a cell.
///
/// Given a CellId (or something that can be converted to one), returns an iterator over the
/// cases that make up a particular cell, yielding items of type `(CaseId, &bool)`,
/// representing a particular case (i.e. position and value) and whether or not that case is
/// possible.
///
/// # Examples
///
/// ```rust
/// # use rusudoku::grid::*;
/// let mut grid = Grid::new(9);
/// let (case, value) = grid.cell((5, 0)).nth(3).unwrap();
/// assert!(*value); // The cell at 5,0 could be the value 4.
/// ```
pub fn cell<T>(&self, cell_id: T) -> CasesIter
where CellId: FromIndex<T> {
let case_id: fn(_) -> _ = CaseId;
let range = self.cell_range(self.convert(cell_id));
range.clone().map(case_id).zip(self.cases[range].iter())
}
/// Returns mutable iterator over cases/values of a cell.
///
/// Identical to `cell(...)`, except that the boolean references are mutable.
///
/// # Examples
///
/// ```rust
/// # use rusudoku::grid::*;
/// let mut grid = Grid::new(9);
/// let (case, value) = grid.cell_mut((5, 0)).nth(3).unwrap();
/// *value = false; // Prevent the cell at 5,0 from being the value 4.
/// ```
pub fn cell_mut<T>(&mut self, cell_id: T) -> CasesIterMut
where CellId: FromIndex<T> {
let case_id: fn(_) -> _ = CaseId;
let range = self.cell_range(self.convert(cell_id));
range.clone().map(case_id).zip(self.cases[range].iter_mut())
}
/// Returns iterator over all cases/values.
///
/// Iterates over the cases of the puzzle, yielding items of type `(CaseId, &bool)`,
/// representing a particular case (i.e. position and value) and whether or not that
/// case is possible.
///
/// # Examples
///
/// ```rust
/// # use rusudoku::grid::*;
/// let mut grid = Grid::new(9);
/// let (case, value) = grid.cases().nth(100).unwrap();
/// assert!(*value); // The cell at 2,1 could be the value 2.
/// ```
pub fn cases(&self) -> CasesIter {
let case_id: fn(_) -> _ = CaseId;
(0..self.cases.len()).map(case_id).zip(self.cases.iter())
}
/// Returns mutable iterator over all cases/values.
///
/// Identical to `cases()`, except that the boolean references are mutable.
///
/// # Examples
///
/// ```rust
/// # use rusudoku::grid::*;
/// let mut grid = Grid::new(9);
/// let (case, value) = grid.cases_mut().nth(100).unwrap();
/// *value = false; // Prevent the cell at 2,1 from being the value 2.
/// ```
pub fn cases_mut(&mut self) -> CasesIterMut {
let case_id: fn(_) -> _ = CaseId;
(0..self.cases.len()).map(case_id).zip(self.cases.iter_mut())
}
/// Marks supplied `CaseId`s as impossible
///
/// This is a convenience method for interacting with solvers. It's equivalent to:
///
/// ```rust
/// # use rusudoku::grid::*;
/// # let mut grid = Grid::new(1);
/// # let vetoes = vec![CaseId(0)];
/// for veto in vetoes {
/// grid[veto] = false;
/// }
/// ```
pub fn veto<T>(&mut self, vetoes: T)
where T: iter::Iterator<Item=CaseId> {
for veto in vetoes {
self[veto] = false;
}
}
/// Returns `CaseId`s that are impossible.
///
/// This is a convenience method for interacting with solvers. It returns an iterator of
/// `CaseId`s that have been marked as impossible.
pub fn vetoes(&self) -> VetoesIter {
fn to_case_id_if_vetoed((case_id, allowed): (usize, &bool)) -> Option<CaseId> {
if *allowed { None } else { Some(CaseId(case_id)) }
}
self.cases.iter().enumerate().filter_map(to_case_id_if_vetoed)
}
}
pub type CellsIter<'a> = iter::Zip<iter::Map<ops::RangeFrom<usize>, fn(usize) -> CellId>, slice::Chunks<'a, bool>>;
pub type CellsIterMut<'a> = iter::Zip<iter::Map<ops::RangeFrom<usize>, fn(usize) -> CellId>, slice::ChunksMut<'a, bool>>;
pub type CasesIter<'a> = iter::Zip<iter::Map<ops::Range<usize>, fn(usize) -> CaseId>, slice::Iter<'a, bool>>;
pub type CasesIterMut<'a> = iter::Zip<iter::Map<ops::Range<usize>, fn(usize) -> CaseId>, slice::IterMut<'a, bool>>;
pub type VetoesIter<'a> = iter::FilterMap<iter::Enumerate<slice::Iter<'a, bool>>, fn((usize, &bool)) -> Option<CaseId>>;
impl HasGridSize for Grid {
fn grid_size(&self) -> usize { self.size }
}
impl ops::Index<CellId> for Grid {
type Output = [bool];
fn index(&self, cell_id: CellId) -> &[bool] {
let range = self.cell_range(cell_id);
&self.cases[range]
}
}
impl ops::IndexMut<CellId> for Grid {
fn index_mut(&mut self, cell_id: CellId) -> &mut [bool] {
let range = self.cell_range(cell_id);
&mut self.cases[range]
}
}
impl ops::Index<CaseId> for Grid {
type Output = bool;
fn index(&self, CaseId(case_id): CaseId) -> &bool {
&self.cases[case_id]
}
}
impl ops::IndexMut<CaseId> for Grid {
fn index_mut(&mut self, CaseId(case_id): CaseId) -> &mut bool {
&mut self.cases[case_id]
}
}
impl fmt::Display for Grid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
io::GridWriter::new().write(f, &self)?;
Ok(())
}
}
// Wish I could figure out how to get Index<FromIndex<T>> to work...
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_value_id_conversions() {
let c = SimpleIndexUtil(9);
let cases = vec![(1, 0), (5,4), (9, 8)];
for (initial, expected_internal) in cases {
let ValueId(value_id) = c.convert(initial);
assert_eq!(value_id, expected_internal);
let value: usize = c.convert(ValueId(value_id));
assert_eq!(value, initial);
}
}
#[test]
#[should_panic]
#[cfg(debug_assertions)]
fn test_value_id_cannot_be_zero() {
let c = SimpleIndexUtil(9);
let _: ValueId = c.convert(0);
}
#[test]
#[should_panic]
#[cfg(debug_assertions)]
fn test_value_id_cannot_be_too_large() {
let c = SimpleIndexUtil(9);
let _: ValueId = c.convert(10);
}
#[test]
fn test_cell_id_conversions() {
let c = SimpleIndexUtil(9);
let cases = vec![(0, 0, 0), (5, 0, 5), (8, 0, 8),
(0, 1, 9), (2, 3, 29), (8, 4, 44),
(0, 8, 72), (4, 8, 76), (8, 8, 80)];
for (initial_x, initial_y, expected_internal) in cases {
let CellId(cell_id) = c.convert((initial_x, initial_y));
assert_eq!(cell_id, expected_internal);
let coords: (usize, usize) = c.convert(CellId(cell_id));
assert_eq!(coords, (initial_x, initial_y));
}
}
#[test]
#[should_panic]
#[cfg(debug_assertions)]
fn test_cell_id_x_cannot_be_too_large() {
let c = SimpleIndexUtil(9);
let _: CellId = c.convert((9,0));
}
#[test]
#[should_panic]
#[cfg(debug_assertions)]
fn test_cell_id_y_cannot_be_too_large() {
let c = SimpleIndexUtil(9);
let _: CellId = c.convert((0,9));
}
#[test]
fn test_case_id_conversions() {
let c = SimpleIndexUtil(9);
let cases = vec![( 0, 0, 0), ( 0, 4, 4), ( 0, 8, 8),
( 1, 0, 9), ( 2, 3, 21), ( 8, 4, 76),
(79, 8, 719), (80, 0, 720), (80, 8, 728)];
for (initial_cell_id, initial_value_id, expected_internal) in cases {
let cell = CellId(initial_cell_id);
let value = ValueId(initial_value_id);
let CaseId(case_id) = c.convert((cell, value));
assert_eq!(case_id, expected_internal);
let CaseId(case_id) = c.convert((value, cell));
assert_eq!(case_id, expected_internal);
let (CellId(cell_id), ValueId(value_id)) = c.convert(CaseId(case_id));
assert_eq!((cell_id, value_id), (initial_cell_id, initial_value_id));
let (ValueId(value_id), CellId(cell_id)) = c.convert(CaseId(case_id));
assert_eq!((value_id, cell_id), (initial_value_id, initial_cell_id));
}
}
#[test]
fn test_alternate_size_conversions() {
let c = SimpleIndexUtil(16);
let CellId(cell_id) = c.convert((5,12));
assert_eq!(cell_id, 197);
let ValueId(value_id) = c.convert(9);
assert_eq!(value_id, 8);
let CaseId(case_id) = c.convert((CellId(cell_id), ValueId(value_id)));
assert_eq!(case_id, 3160);
}
#[test]
fn test_index_ranges() {
let c = SimpleIndexUtil(4);
assert_eq!(c.range(), ValueId(0)..ValueId(4));
assert_eq!(c.range(), CellId(0)..CellId(16));
assert_eq!(c.range(), CaseId(0)..CaseId(64));
let c = SimpleIndexUtil(9);
assert_eq!(c.range(), ValueId(0)..ValueId(9));
assert_eq!(c.range(), CellId(0)..CellId(81));
assert_eq!(c.range(), CaseId(0)..CaseId(729));
}
#[test]
fn test_index_range_iters() {
let c = SimpleIndexUtil(4);
assert_eq!(c.range_iter::<ValueId>().last(), Some(ValueId(3)));
assert_eq!(c.range_iter::<ValueId>().count(), 4);
assert_eq!(c.range_iter::<CellId>().last(), Some(CellId(15)));
assert_eq!(c.range_iter::<CellId>().count(), 16);
assert_eq!(c.range_iter::<CaseId>().last(), Some(CaseId(63)));
assert_eq!(c.range_iter::<CaseId>().count(), 64);
let c = SimpleIndexUtil(9);
assert_eq!(c.range_iter::<ValueId>().count(), 9);
assert_eq!(c.range_iter::<CellId>().count(), 81);
assert_eq!(c.range_iter::<CaseId>().count(), 729);
}
#[test]
fn test_new_grid() {
let grid = Grid::new(9);
assert_eq!(grid.size(), 9);
assert_eq!(grid.cases.len(), 729);
}
#[test]
fn test_grid_literal() {
let grid = Grid::literal(&[
1, 2, 3, 4,
3, 0, 0, 2,
2, 0, 0, 3,
4, 3, 2, 1,
]);
assert_eq!(grid[CellId( 0)], [true, false, false, false]);
assert_eq!(grid[CellId( 1)], [false, true, false, false]);
assert_eq!(grid[CellId( 2)], [false, false, true, false]);
assert_eq!(grid[CellId( 3)], [false, false, false, true ]);
assert_eq!(grid[CellId( 4)], [false, false, true, false]);
assert_eq!(grid[CellId( 5)], [true, true, true, true ]);
assert_eq!(grid[CellId( 6)], [true, true, true, true ]);
assert_eq!(grid[CellId( 7)], [false, true, false, false]);
assert_eq!(grid[CellId( 8)], [false, true, false, false]);
assert_eq!(grid[CellId( 9)], [true, true, true, true ]);
assert_eq!(grid[CellId(10)], [true, true, true, true ]);
assert_eq!(grid[CellId(11)], [false, false, true, false]);
assert_eq!(grid[CellId(12)], [false, false, false, true ]);
assert_eq!(grid[CellId(13)], [false, false, true, false]);
assert_eq!(grid[CellId(14)], [false, true, false, false]);
assert_eq!(grid[CellId(15)], [true, false, false, false]);
}
#[test]
fn test_grid_cells_iter() {
let mut grid = Grid::new(9);
for i in vec![9,10,12,13,14,16,17] {
grid.cases[i] = false;
}
assert_eq!(grid.cells().count(), 81);
let cells: Vec<_> = grid.cells().take(3).collect();
assert_eq!(cells[0].0, grid.convert((0,0)));
assert_eq!(cells[1].0, grid.convert((1,0)));
assert_eq!(cells[2].0, grid.convert((2,0)));
assert_eq!(cells[0].1, [true; 9]);
assert_eq!(cells[1].1, [false, false, true, false, false, false, true, false, false]);
assert_eq!(cells[2].1, [true; 9]);
}
#[test]
fn test_grid_cells_mut_iter() {
let mut grid = Grid::new(9);
let cell = {
let (cell, possibilities) = grid.cells_mut().nth(3).unwrap();
possibilities[0] = false;
possibilities[3] = false;
possibilities[8] = false;
cell
};
assert_eq!((3,0), grid.convert(cell));
assert!(grid.cases[27..36] == [false, true, true, false, true, true, true, true, false]);
}
#[test]
fn test_grid_cell_iter() {
let mut grid = Grid::new(9);
for i in vec![9,10,12,13,14,16,17] {
grid.cases[i] = false;
}
let (cases, possibilities): (Vec<_>, Vec<_>) = grid.cell((1,0)).map(|(case, &ok)| (case, ok)).unzip();
assert_eq!(possibilities, [false, false, true, false, false, false, true, false, false]);
let (cells, values): (Vec<_>, Vec<_>) = cases.iter()
.map(|&case| grid.convert::<_, (CellId, ValueId)>(case))
.unzip();
assert!(cells.iter().all(|&cell| cell == grid.convert((1,0))));
assert_eq!(values[0], grid.convert(1));
assert_eq!(values[8], grid.convert(9));
}
#[test]
fn test_grid_cell_mut_iter() {
let mut grid = Grid::new(9);
let c = SimpleIndexUtil(9);
for (i, (case, possibility)) in grid.cell_mut((1,0)).enumerate() {
let (cell, value): (CellId, ValueId) = c.convert(case);
assert_eq!((1,0), c.convert(cell));
assert_eq!(i+1, c.convert(value));
assert!(*possibility);
*possibility = false;
}
assert!(grid.cases[9..18] == [false; 9]);
}
#[test]
fn test_grid_cases_iter() {
let mut grid = Grid::new(9);
grid.cases[8] = false;
grid.cases[9] = false;
let cases: Vec<_> = grid.cases().skip(7).take(4).collect();
assert_eq!(cases[0].0, CaseId(7));
assert_eq!(cases[1].0, CaseId(8));
assert_eq!(cases[2].0, CaseId(9));
assert_eq!(cases[3].0, CaseId(10));
assert!(cases[0].1);
assert!(!cases[1].1);
assert!(!cases[2].1);
assert!(cases[3].1);
}
#[test]
fn test_grid_cases_mut_iter() {
let mut grid = Grid::new(9);
{
let (case, possibility) = grid.cases_mut().nth(10).unwrap();
*possibility = false;
assert_eq!(case, CaseId(10));
}
assert!(grid.cases[9]);
assert!(!grid.cases[10]);
assert!(grid.cases[11]);
}
#[test]
fn test_grid_veto() {
let mut grid = Grid::new(9);
let vetoes = [7, 42, 53];
grid.veto(vetoes.iter().cloned().map(CaseId));
assert!(!grid[CaseId(7)]);
assert!(!grid[CaseId(42)]);
assert!(!grid[CaseId(53)]);
}
#[test]
fn test_grid_vetoes() {
let mut grid = Grid::new(9);
grid[CaseId(7)] = false;
grid[CaseId(42)] = false;
grid[CaseId(53)] = false;
let vetoes: Vec<_> = grid.vetoes().collect();
assert_eq!(vetoes, [CaseId(7), CaseId(42), CaseId(53)]);
}
#[test]
fn test_grid_index_by_cell() {
let mut grid = Grid::new(9);
for i in vec![0,1,3,4,5,7,8] {
grid[CellId(4)][i] = false;
}
assert!(grid[CellId(3)] == [true; 9]);
assert!(grid[CellId(4)] == [false, false, true, false, false, false, true, false, false]);
assert!(grid[CellId(5)] == [true; 9]);
}
#[test]
fn test_grid_index_by_case() {
let mut grid = Grid::new(9);
for i in vec![0,1,5,20,50,80] {
assert!(grid[CaseId(i)]);
grid[CaseId(i)] = false;
assert!(!grid[CaseId(i)]);
}
assert!(grid[CaseId(2)]);
assert!(grid[CaseId(6)]);
assert!(grid[CaseId(25)]);
assert!(grid[CaseId(40)]);
assert!(grid[CaseId(79)]);
}
}
|
pub struct Triangle {
side_1: u64,
side_2: u64,
side_3: u64
}
impl Triangle {
pub fn build(sides: [u64; 3]) -> Option<Triangle> {
pub fn are_sides_valid(sides: [u64; 3]) -> bool {
// all sides > 0
let positive_sides: bool = sides[0] > 0 && sides[1] > 0 && sides[2] > 0;
let sum_of_2_sides: bool = (sides[0] + sides[1] >= sides[2]) &&
(sides[1] + sides[2] >= sides[0]) &&
(sides[0] + sides[2] >= sides[1]);
positive_sides && sum_of_2_sides
}
match are_sides_valid(sides) {
true => Some(Triangle {
side_1: sides[0],
side_2: sides[1],
side_3: sides[2]
}),
false => None
}
}
pub fn is_equilateral(&self) -> bool {
(self.side_1 == self.side_2) && (self.side_2 == self.side_3)
}
pub fn is_scalene(&self) -> bool {
(self.side_1 != self.side_2) && (self.side_2 != self.side_3) && (self.side_1 != self.side_3)
}
pub fn is_isosceles(&self) -> bool {
(self.side_1 == self.side_2) || (self.side_1 == self.side_3) || (self.side_2 == self.side_3)
}
}
|
use tokio::process::Command;
use anyhow::{Result, Context, Error};
use async_trait::async_trait;
use crate::{helpers::{self, ExitStatusIntoUnit}, services::model::{Nameable, Ensurable, Removable}};
static NAME: &str = "k3d cluster";
#[derive(Default)]
pub struct K3dService {
k3d_cluster_name: String,
k3d_image: String,
k3d_api_address: String,
k3d_api_port: String,
}
impl K3dService {
pub fn with_k3d_cluster_name(mut self, n: &str) -> Self {
self.k3d_cluster_name = n.to_owned();
self
}
pub fn with_k3d_image(mut self, i: &str) -> Self {
self.k3d_image = i.to_owned();
self
}
pub fn with_k3d_api_port(mut self, p: &str) -> Self {
self.k3d_api_port = p.to_owned();
self
}
pub fn with_k3d_api_address(mut self, a: &str) -> Self {
self.k3d_api_address = a.to_owned();
self
}
}
impl Nameable for K3dService {
fn name(&self) -> &'static str {
NAME
}
}
#[async_trait]
impl Ensurable for K3dService {
async fn is_present(&self) -> Result<bool> {
let ps_out = Command::new("docker")
.arg("ps")
.arg("--filter")
.arg(format!("name={}", self.k3d_cluster_name))
.output().await?.stdout;
let ps_out_str = std::str::from_utf8(&ps_out)?;
// TODO: This should probably overwrite the kubeconfig?
// Or, do kubeconfig management throughout this?
Ok(ps_out_str.contains(&self.k3d_cluster_name))
}
async fn make_present(&self) -> Result<()> {
Command::new("k3d")
.arg("cluster")
.arg("create")
.arg(&self.k3d_cluster_name)
.args(&["--image", &self.k3d_image])
.args(&["--api-port", &format!("{}:{}", self.k3d_api_address, self.k3d_api_port)])
//.args(&["-p", "5443:443@loadbalancer"])
//.args(&["-p", "5080:80@loadbalancer"])
.status().await
.status_to_unit()
.context("Unable to start the k3d k8s cluster.")?;
tokio::time::delay_for(tokio::time::Duration::from_secs(10)).await;
println!("Checking if we are inside a container ...");
if helpers::is_docker().await? {
// On Mac and Windows, we should replace with `host.docker.internal`. On Linux, people can just run this executable
// anyway, so bleh.
println!("Overwriting the kubeconfig since we are inside a container ...");
Command::new("sed")
.arg("-i")
.arg("s/0.0.0.0/host.docker.internal/g")
.arg("/root/.kube/config")
.status().await
.status_to_unit()
.context("Unable to overwrite the kubeconfig.")?;
}
println!("Waiting for traefik deployment to complete ...");
Command::new("kubectl")
.arg("wait")
.arg("--for=condition=complete")
.arg("--timeout=600s")
.arg("job/helm-install-traefik")
.arg("-n")
.arg("kube-system")
.status().await
.status_to_unit()
.context("Unable to wait for the traefik deployment to complete.")?;
println!("Waiting for traefik deployment to come up ...");
Command::new("kubectl")
.arg("wait")
.arg("--for=condition=available")
.arg("--timeout=600s")
.arg("deploy/traefik")
.arg("-n")
.arg("kube-system")
.status().await
.status_to_unit()
.context("Unable to wait for the traefik deployment to come up.")?;
if self.is_present().await? {
Ok(())
} else {
Err(Error::msg("Unable to verify that the k3d cluster is running."))
}
}
}
#[async_trait]
impl Removable for K3dService {
async fn make_not_present(&self) -> Result<()> {
Command::new("k3d")
.arg("cluster")
.arg("delete")
.arg(&self.k3d_cluster_name)
.status().await
.status_to_unit()
.context("Unable to stop the k3d k8s cluster.")?;
Ok(())
}
} |
#[derive(Clone, Copy, PartialEq, Debug)]
pub struct GlError(pub GLenum);
#[derive(Debug)]
pub enum InitError {
GlError(GlError),
CompileFailed(&'static str, String),
LinkFailed(String),
ComputeError(compute_shader::error::Error),
InvalidSetting,
}
#[derive(Debug)]
pub enum RasterError {
GlError(GlError),
ComputeError(compute_shader::error::Error),
UnsupportedImageFormat,
} |
use gotham::middleware::{Middleware, NewMiddleware};
use gotham::state::State;
use gotham::handler::HandlerFuture;
use std::io;
use std::env;
use state::AppConfig;
use futures::{future, Future};
pub struct New {}
impl NewMiddleware for New {
type Instance = Ware;
fn new_middleware(&self) -> io::Result<Self::Instance> {
Ok(Ware {})
}
}
pub struct Ware {}
impl Middleware for Ware {
fn call<Chain>(self, mut state: State, chain: Chain) -> Box<HandlerFuture>
where
Chain: FnOnce(State) -> Box<HandlerFuture> + 'static,
Self: Sized,
{
let cfg = AppConfig {
canonical_url: env::var("CANONICAL_URL").unwrap_or_default(),
oauth_path: env::var("OAUTH_PATH").unwrap_or_default(),
api_key: env::var("API_KEY").unwrap_or_default(),
client_id: env::var("CLIENT_ID").unwrap_or_default(),
client_secret: env::var("CLIENT_SECRET").unwrap_or_default(),
access_token: "".to_owned(),
refresh_token: "".to_owned(),
};
debug!("AppConfig: putting config in state");
state.put(cfg);
Box::new(future::ok(state).and_then(|state| chain(state)))
}
}
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::{Block, Crate, Ident, Mac_, PatKind};
use ast::{MacStmtStyle, Stmt, StmtKind, ItemKind};
use ast;
use ext::hygiene::Mark;
use attr;
use codemap::{dummy_spanned, ExpnInfo, NameAndSpan, MacroBang};
use syntax_pos::{self, Span, ExpnId};
use config::StripUnconfigured;
use ext::base::*;
use ext::decorator::expand_annotatable;
use feature_gate::{self, Features};
use fold;
use fold::*;
use parse::token::keywords;
use ptr::P;
use tokenstream::TokenTree;
use util::small_vector::SmallVector;
use visit;
use visit::Visitor;
use std_inject;
// A trait for AST nodes and AST node lists into which macro invocations may expand.
trait MacroGenerable: Sized {
// Expand the given MacResult using its appropriate `make_*` method.
fn make_with<'a>(result: Box<MacResult + 'a>) -> Option<Self>;
// Fold this node or list of nodes using the given folder.
fn fold_with<F: Folder>(self, folder: &mut F) -> Self;
fn visit_with<V: Visitor>(&self, visitor: &mut V);
// The user-friendly name of the node type (e.g. "expression", "item", etc.) for diagnostics.
fn kind_name() -> &'static str;
// Return a placeholder expansion to allow compilation to continue after an erroring expansion.
fn dummy(span: Span) -> Self {
Self::make_with(DummyResult::any(span)).unwrap()
}
}
macro_rules! impl_macro_generable {
($($ty:ty: $kind_name:expr, .$make:ident,
$(.$fold:ident)* $(lift .$fold_elt:ident)*,
$(.$visit:ident)* $(lift .$visit_elt:ident)*;)*) => { $(
impl MacroGenerable for $ty {
fn kind_name() -> &'static str { $kind_name }
fn make_with<'a>(result: Box<MacResult + 'a>) -> Option<Self> { result.$make() }
fn fold_with<F: Folder>(self, folder: &mut F) -> Self {
$( folder.$fold(self) )*
$( self.into_iter().flat_map(|item| folder. $fold_elt (item)).collect() )*
}
fn visit_with<V: Visitor>(&self, visitor: &mut V) {
$( visitor.$visit(self) )*
$( for item in self.as_slice() { visitor. $visit_elt (item) } )*
}
}
)* }
}
impl_macro_generable! {
P<ast::Expr>: "expression", .make_expr, .fold_expr, .visit_expr;
P<ast::Pat>: "pattern", .make_pat, .fold_pat, .visit_pat;
P<ast::Ty>: "type", .make_ty, .fold_ty, .visit_ty;
SmallVector<ast::Stmt>: "statement", .make_stmts, lift .fold_stmt, lift .visit_stmt;
SmallVector<P<ast::Item>>: "item", .make_items, lift .fold_item, lift .visit_item;
SmallVector<ast::TraitItem>:
"trait item", .make_trait_items, lift .fold_trait_item, lift .visit_trait_item;
SmallVector<ast::ImplItem>:
"impl item", .make_impl_items, lift .fold_impl_item, lift .visit_impl_item;
}
impl MacroGenerable for Option<P<ast::Expr>> {
fn kind_name() -> &'static str { "expression" }
fn make_with<'a>(result: Box<MacResult + 'a>) -> Option<Self> {
result.make_expr().map(Some)
}
fn fold_with<F: Folder>(self, folder: &mut F) -> Self {
self.and_then(|expr| folder.fold_opt_expr(expr))
}
fn visit_with<V: Visitor>(&self, visitor: &mut V) {
self.as_ref().map(|expr| visitor.visit_expr(expr));
}
}
pub fn expand_expr(expr: ast::Expr, fld: &mut MacroExpander) -> P<ast::Expr> {
match expr.clone().node {
// expr_mac should really be expr_ext or something; it's the
// entry-point for all syntax extensions.
ast::ExprKind::Mac(mac) => {
return expand_mac_invoc(mac, None, expr.attrs.clone().into(), expr.span, fld, P(expr));
}
_ => P(noop_fold_expr(expr, fld)),
}
}
struct MacroScopePlaceholder;
impl MacResult for MacroScopePlaceholder {
fn make_items(self: Box<Self>) -> Option<SmallVector<P<ast::Item>>> {
Some(SmallVector::one(P(ast::Item {
ident: keywords::Invalid.ident(),
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemKind::Mac(dummy_spanned(ast::Mac_ {
path: ast::Path { span: syntax_pos::DUMMY_SP, global: false, segments: Vec::new() },
tts: Vec::new(),
})),
vis: ast::Visibility::Inherited,
span: syntax_pos::DUMMY_SP,
})))
}
}
/// Expand a macro invocation. Returns the result of expansion.
fn expand_mac_invoc<T>(mac: ast::Mac, ident: Option<Ident>, attrs: Vec<ast::Attribute>, span: Span,
fld: &mut MacroExpander,
// FIXME(syntex): ignore unknown results
original_value: T
) -> T
where T: MacroGenerable,
{
// FIXME(syntex): Ignore unknown results
enum ExpandResult<T> {
Some(T),
None,
UnknownMacro,
}
// It would almost certainly be cleaner to pass the whole macro invocation in,
// rather than pulling it apart and marking the tts and the ctxt separately.
let Mac_ { path, tts, .. } = mac.node;
let mark = Mark::fresh();
fn mac_result<'a>(path: &ast::Path, ident: Option<Ident>, tts: Vec<TokenTree>, mark: Mark,
attrs: Vec<ast::Attribute>, call_site: Span, fld: &'a mut MacroExpander)
-> ExpandResult<Box<MacResult + 'a>> {
// Detect use of feature-gated or invalid attributes on macro invoations
// since they will not be detected after macro expansion.
for attr in attrs.iter() {
feature_gate::check_attribute(&attr, &fld.cx.parse_sess.span_diagnostic,
&fld.cx.parse_sess.codemap(),
&fld.cx.ecfg.features.unwrap());
}
if path.segments.len() > 1 || path.global || !path.segments[0].parameters.is_empty() {
fld.cx.span_err(path.span, "expected macro name without module separators");
return ExpandResult::None;
}
let extname = path.segments[0].identifier.name;
let extension = if let Some(extension) = fld.cx.syntax_env.find(extname) {
extension
} else {
// SYNTEX: Ignore unknown macros.
/*
let mut err = fld.cx.struct_span_err(path.span,
&format!("macro undefined: '{}!'", &extname));
fld.cx.suggest_macro_name(&extname.as_str(), &mut err);
err.emit();
*/
return ExpandResult::UnknownMacro;
};
let ident = ident.unwrap_or(keywords::Invalid.ident());
let marked_tts = mark_tts(&tts, mark);
match *extension {
NormalTT(ref expandfun, exp_span, allow_internal_unstable) => {
if ident.name != keywords::Invalid.name() {
let msg =
format!("macro {}! expects no ident argument, given '{}'", extname, ident);
fld.cx.span_err(path.span, &msg);
return ExpandResult::None;
}
fld.cx.bt_push(ExpnInfo {
call_site: call_site,
callee: NameAndSpan {
format: MacroBang(extname),
span: exp_span,
allow_internal_unstable: allow_internal_unstable,
},
});
ExpandResult::Some(expandfun.expand(fld.cx, call_site, &marked_tts))
}
IdentTT(ref expander, tt_span, allow_internal_unstable) => {
if ident.name == keywords::Invalid.name() {
fld.cx.span_err(path.span,
&format!("macro {}! expects an ident argument", extname));
return ExpandResult::None;
};
fld.cx.bt_push(ExpnInfo {
call_site: call_site,
callee: NameAndSpan {
format: MacroBang(extname),
span: tt_span,
allow_internal_unstable: allow_internal_unstable,
}
});
ExpandResult::Some(expander.expand(fld.cx, call_site, ident, marked_tts))
}
MacroRulesTT => {
if ident.name == keywords::Invalid.name() {
fld.cx.span_err(path.span,
&format!("macro {}! expects an ident argument", extname));
return ExpandResult::None;
};
fld.cx.bt_push(ExpnInfo {
call_site: call_site,
callee: NameAndSpan {
format: MacroBang(extname),
span: None,
// `macro_rules!` doesn't directly allow unstable
// (this is orthogonal to whether the macro it creates allows it)
allow_internal_unstable: false,
}
});
let def = ast::MacroDef {
ident: ident,
id: ast::DUMMY_NODE_ID,
span: call_site,
imported_from: None,
use_locally: true,
body: marked_tts,
export: attr::contains_name(&attrs, "macro_export"),
allow_internal_unstable: attr::contains_name(&attrs, "allow_internal_unstable"),
attrs: attrs,
};
fld.cx.insert_macro(def.clone());
// macro_rules! has a side effect, but expands to nothing.
// If keep_macs is true, expands to a MacEager::items instead.
if fld.keep_macs {
ExpandResult::Some(MacEager::items(SmallVector::one(P(ast::Item {
ident: def.ident,
attrs: def.attrs.clone(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemKind::Mac(ast::Mac {
span: def.span,
node: ast::Mac_ {
path: path.clone(),
tts: def.body.clone(),
}
}),
vis: ast::Visibility::Inherited,
span: def.span,
}))))
} else {
ExpandResult::Some(Box::new(MacroScopePlaceholder))
}
}
MultiDecorator(..) | MultiModifier(..) => {
fld.cx.span_err(path.span,
&format!("`{}` can only be used in attributes", extname));
ExpandResult::None
}
}
}
let opt_expanded = T::make_with(match mac_result(&path, ident, tts, mark, attrs, span, fld) {
ExpandResult::Some(result) => result,
ExpandResult::None => return T::dummy(span),
ExpandResult::UnknownMacro => return original_value,
});
let expanded = if let Some(expanded) = opt_expanded {
expanded
} else {
let msg = format!("non-{kind} macro in {kind} position: {name}",
name = path.segments[0].identifier.name, kind = T::kind_name());
fld.cx.span_err(path.span, &msg);
return T::dummy(span);
};
let marked = expanded.fold_with(&mut Marker { mark: mark, expn_id: Some(fld.cx.backtrace()) });
let configured = marked.fold_with(&mut fld.strip_unconfigured());
fld.load_macros(&configured);
let fully_expanded = if fld.single_step {
configured
} else {
configured.fold_with(fld)
};
fld.cx.bt_pop();
fully_expanded
}
// eval $e with a new exts frame.
// must be a macro so that $e isn't evaluated too early.
macro_rules! with_exts_frame {
($extsboxexpr:expr,$macros_escape:expr,$e:expr) =>
({$extsboxexpr.push_frame();
$extsboxexpr.info().macros_escape = $macros_escape;
let result = $e;
$extsboxexpr.pop_frame();
result
})
}
// When we enter a module, record it, for the sake of `module!`
pub fn expand_item(it: P<ast::Item>, fld: &mut MacroExpander)
-> SmallVector<P<ast::Item>> {
expand_annotatable(Annotatable::Item(it), fld)
.into_iter().map(|i| i.expect_item()).collect()
}
// does this attribute list contain "macro_use" ?
fn contains_macro_use(fld: &mut MacroExpander, attrs: &[ast::Attribute]) -> bool {
for attr in attrs {
let mut is_use = attr.check_name("macro_use");
if attr.check_name("macro_escape") {
let mut err =
fld.cx.struct_span_warn(attr.span,
"macro_escape is a deprecated synonym for macro_use");
is_use = true;
if let ast::AttrStyle::Inner = attr.node.style {
err.help("consider an outer attribute, \
#[macro_use] mod ...").emit();
} else {
err.emit();
}
};
if is_use {
if !attr.is_word() {
fld.cx.span_err(attr.span, "arguments to macro_use are not allowed here");
}
return true;
}
}
false
}
/// Expand a stmt
fn expand_stmt(stmt: Stmt, fld: &mut MacroExpander) -> SmallVector<Stmt> {
let (mac, style, attrs) = match stmt.clone().node {
StmtKind::Mac(mac) => mac.unwrap(),
_ => return noop_fold_stmt(stmt, fld)
};
let mut fully_expanded: SmallVector<ast::Stmt> =
expand_mac_invoc(mac, None, attrs.into(), stmt.span, fld, SmallVector::one(stmt));
// If this is a macro invocation with a semicolon, then apply that
// semicolon to the final statement produced by expansion.
if style == MacStmtStyle::Semicolon {
if let Some(stmt) = fully_expanded.pop() {
fully_expanded.push(stmt.add_trailing_semicolon());
}
}
fully_expanded
}
fn expand_pat(p: P<ast::Pat>, fld: &mut MacroExpander) -> P<ast::Pat> {
match p.node {
PatKind::Mac(_) => {}
_ => return noop_fold_pat(p, fld)
}
p.clone().and_then(|ast::Pat {node, span, ..}| {
match node {
PatKind::Mac(mac) => expand_mac_invoc(mac, None, Vec::new(), span, fld, p),
_ => unreachable!()
}
})
}
pub fn expand_multi_modified(a: Annotatable, fld: &mut MacroExpander) -> SmallVector<Annotatable> {
match a {
Annotatable::Item(it) => match it.node {
ast::ItemKind::Mac(..) => {
if match it.node {
ItemKind::Mac(ref mac) => mac.node.path.segments.is_empty(),
_ => unreachable!(),
} {
return SmallVector::one(Annotatable::Item(it));
}
it.and_then(|it| match it.clone().node {
ItemKind::Mac(mac) =>
expand_mac_invoc(mac, Some(it.ident), it.attrs.clone(), it.span, fld, SmallVector::one(P(it))),
_ => unreachable!(),
})
}
ast::ItemKind::Mod(_) | ast::ItemKind::ForeignMod(_) => {
let valid_ident =
it.ident.name != keywords::Invalid.name();
if valid_ident {
fld.cx.mod_push(it.ident);
}
let macro_use = contains_macro_use(fld, &it.attrs);
let result = with_exts_frame!(fld.cx.syntax_env,
macro_use,
noop_fold_item(it, fld));
if valid_ident {
fld.cx.mod_pop();
}
result
},
_ => noop_fold_item(it, fld),
}.into_iter().map(|i| Annotatable::Item(i)).collect(),
Annotatable::TraitItem(it) => {
expand_trait_item(it.unwrap(), fld).into_iter().
map(|it| Annotatable::TraitItem(P(it))).collect()
}
Annotatable::ImplItem(ii) => {
expand_impl_item(ii.unwrap(), fld).into_iter().
map(|ii| Annotatable::ImplItem(P(ii))).collect()
}
}
}
fn expand_impl_item(ii: ast::ImplItem, fld: &mut MacroExpander)
-> SmallVector<ast::ImplItem> {
match ii.clone().node {
ast::ImplItemKind::Macro(mac) => {
expand_mac_invoc(mac, None, ii.attrs.clone(), ii.span, fld, SmallVector::one(ii))
}
_ => fold::noop_fold_impl_item(ii, fld)
}
}
fn expand_trait_item(ti: ast::TraitItem, fld: &mut MacroExpander)
-> SmallVector<ast::TraitItem> {
match ti.clone().node {
ast::TraitItemKind::Macro(mac) => {
expand_mac_invoc(mac, None, ti.attrs.clone(), ti.span, fld, SmallVector::one(ti))
}
_ => fold::noop_fold_trait_item(ti, fld)
}
}
pub fn expand_type(t: P<ast::Ty>, fld: &mut MacroExpander) -> P<ast::Ty> {
let t = match t.node.clone() {
ast::TyKind::Mac(mac) => {
expand_mac_invoc(mac, None, Vec::new(), t.span, fld, t)
}
_ => t
};
fold::noop_fold_ty(t, fld)
}
/// A tree-folder that performs macro expansion
pub struct MacroExpander<'a, 'b:'a> {
pub cx: &'a mut ExtCtxt<'b>,
pub single_step: bool,
pub keep_macs: bool,
}
impl<'a, 'b> MacroExpander<'a, 'b> {
pub fn new(cx: &'a mut ExtCtxt<'b>,
single_step: bool,
keep_macs: bool) -> MacroExpander<'a, 'b> {
MacroExpander {
cx: cx,
single_step: single_step,
keep_macs: keep_macs
}
}
fn strip_unconfigured(&mut self) -> StripUnconfigured {
StripUnconfigured {
config: &self.cx.cfg,
should_test: self.cx.ecfg.should_test,
sess: self.cx.parse_sess,
features: self.cx.ecfg.features,
}
}
fn load_macros<T: MacroGenerable>(&mut self, node: &T) {
struct MacroLoadingVisitor<'a, 'b: 'a>{
cx: &'a mut ExtCtxt<'b>,
at_crate_root: bool,
}
impl<'a, 'b> Visitor for MacroLoadingVisitor<'a, 'b> {
fn visit_mac(&mut self, _: &ast::Mac) {}
fn visit_item(&mut self, item: &ast::Item) {
if let ast::ItemKind::ExternCrate(..) = item.node {
// We need to error on `#[macro_use] extern crate` when it isn't at the
// crate root, because `$crate` won't work properly.
for def in self.cx.loader.load_crate(item, self.at_crate_root) {
self.cx.insert_macro(def);
}
} else {
let at_crate_root = ::std::mem::replace(&mut self.at_crate_root, false);
visit::walk_item(self, item);
self.at_crate_root = at_crate_root;
}
}
fn visit_block(&mut self, block: &ast::Block) {
let at_crate_root = ::std::mem::replace(&mut self.at_crate_root, false);
visit::walk_block(self, block);
self.at_crate_root = at_crate_root;
}
}
node.visit_with(&mut MacroLoadingVisitor {
at_crate_root: self.cx.syntax_env.is_crate_root(),
cx: self.cx,
});
}
}
impl<'a, 'b> Folder for MacroExpander<'a, 'b> {
fn fold_crate(&mut self, c: Crate) -> Crate {
self.cx.filename = Some(self.cx.parse_sess.codemap().span_to_filename(c.span));
noop_fold_crate(c, self)
}
fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
expr.and_then(|expr| expand_expr(expr, self))
}
fn fold_opt_expr(&mut self, pexpr: P<ast::Expr>) -> Option<P<ast::Expr>> {
pexpr.clone().and_then(|expr| match expr.node {
ast::ExprKind::Mac(mac) =>
expand_mac_invoc(mac, None, expr.attrs.into(), expr.span, self, Some(pexpr)),
_ => Some(expand_expr(expr, self)),
})
}
fn fold_pat(&mut self, pat: P<ast::Pat>) -> P<ast::Pat> {
expand_pat(pat, self)
}
fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
use std::mem::replace;
let result;
if let ast::ItemKind::Mod(ast::Mod { inner, .. }) = item.node {
if item.span.contains(inner) {
self.push_mod_path(item.ident, &item.attrs);
result = expand_item(item, self);
self.pop_mod_path();
} else {
let filename = if inner != syntax_pos::DUMMY_SP {
Some(self.cx.parse_sess.codemap().span_to_filename(inner))
} else { None };
let orig_filename = replace(&mut self.cx.filename, filename);
let orig_mod_path_stack = replace(&mut self.cx.mod_path_stack, Vec::new());
result = expand_item(item, self);
self.cx.filename = orig_filename;
self.cx.mod_path_stack = orig_mod_path_stack;
}
} else {
result = expand_item(item, self);
}
result
}
fn fold_stmt(&mut self, stmt: ast::Stmt) -> SmallVector<ast::Stmt> {
expand_stmt(stmt, self)
}
fn fold_block(&mut self, block: P<Block>) -> P<Block> {
let was_in_block = ::std::mem::replace(&mut self.cx.in_block, true);
let result = with_exts_frame!(self.cx.syntax_env, false, noop_fold_block(block, self));
self.cx.in_block = was_in_block;
result
}
fn fold_trait_item(&mut self, i: ast::TraitItem) -> SmallVector<ast::TraitItem> {
expand_annotatable(Annotatable::TraitItem(P(i)), self)
.into_iter().map(|i| i.expect_trait_item()).collect()
}
fn fold_impl_item(&mut self, i: ast::ImplItem) -> SmallVector<ast::ImplItem> {
expand_annotatable(Annotatable::ImplItem(P(i)), self)
.into_iter().map(|i| i.expect_impl_item()).collect()
}
fn fold_ty(&mut self, ty: P<ast::Ty>) -> P<ast::Ty> {
expand_type(ty, self)
}
}
impl<'a, 'b> MacroExpander<'a, 'b> {
fn push_mod_path(&mut self, id: Ident, attrs: &[ast::Attribute]) {
let default_path = id.name.as_str();
let file_path = match ::attr::first_attr_value_str_by_name(attrs, "path") {
Some(d) => d,
None => default_path,
};
self.cx.mod_path_stack.push(file_path)
}
fn pop_mod_path(&mut self) {
self.cx.mod_path_stack.pop().unwrap();
}
}
pub struct ExpansionConfig<'feat> {
pub crate_name: String,
pub features: Option<&'feat Features>,
pub recursion_limit: usize,
pub trace_mac: bool,
pub should_test: bool, // If false, strip `#[test]` nodes
}
macro_rules! feature_tests {
($( fn $getter:ident = $field:ident, )*) => {
$(
pub fn $getter(&self) -> bool {
match self.features {
Some(&Features { $field: true, .. }) => true,
_ => false,
}
}
)*
}
}
impl<'feat> ExpansionConfig<'feat> {
pub fn default(crate_name: String) -> ExpansionConfig<'static> {
ExpansionConfig {
crate_name: crate_name,
features: None,
recursion_limit: 64,
trace_mac: false,
should_test: false,
}
}
feature_tests! {
fn enable_quotes = quote,
fn enable_asm = asm,
fn enable_log_syntax = log_syntax,
fn enable_concat_idents = concat_idents,
fn enable_trace_macros = trace_macros,
fn enable_allow_internal_unstable = allow_internal_unstable,
fn enable_custom_derive = custom_derive,
fn enable_pushpop_unsafe = pushpop_unsafe,
}
}
pub fn expand_crate(cx: &mut ExtCtxt,
user_exts: Vec<NamedSyntaxExtension>,
c: Crate) -> Crate {
let mut expander = MacroExpander::new(cx, false, false);
expand_crate_with_expander(&mut expander, user_exts, c)
}
// Expands crate using supplied MacroExpander - allows for
// non-standard expansion behaviour (e.g. step-wise).
pub fn expand_crate_with_expander(expander: &mut MacroExpander,
user_exts: Vec<NamedSyntaxExtension>,
mut c: Crate) -> Crate {
if std_inject::no_core(&c) {
expander.cx.crate_root = None;
} else if std_inject::no_std(&c) {
expander.cx.crate_root = Some("core");
} else {
expander.cx.crate_root = Some("std");
}
// User extensions must be added before expander.load_macros is called,
// so that macros from external crates shadow user defined extensions.
for (name, extension) in user_exts {
expander.cx.syntax_env.insert(name, extension);
}
let items = SmallVector::many(c.module.items);
expander.load_macros(&items);
c.module.items = items.into();
let err_count = expander.cx.parse_sess.span_diagnostic.err_count();
let mut ret = expander.fold_crate(c);
ret.exported_macros = expander.cx.exported_macros.clone();
if expander.cx.parse_sess.span_diagnostic.err_count() > err_count {
expander.cx.parse_sess.span_diagnostic.abort_if_errors();
}
ret
}
// A Marker adds the given mark to the syntax context and
// sets spans' `expn_id` to the given expn_id (unless it is `None`).
struct Marker { mark: Mark, expn_id: Option<ExpnId> }
impl Folder for Marker {
fn fold_ident(&mut self, mut ident: Ident) -> Ident {
ident.ctxt = ident.ctxt.apply_mark(self.mark);
ident
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
noop_fold_mac(mac, self)
}
fn new_span(&mut self, mut span: Span) -> Span {
if let Some(expn_id) = self.expn_id {
span.expn_id = expn_id;
}
span
}
}
// apply a given mark to the given token trees. Used prior to expansion of a macro.
fn mark_tts(tts: &[TokenTree], m: Mark) -> Vec<TokenTree> {
noop_fold_tts(tts, &mut Marker{mark:m, expn_id: None})
}
#[cfg(test)]
mod tests {
use super::{expand_crate, ExpansionConfig};
use ast;
use ext::base::{ExtCtxt, DummyResolver};
use parse;
use util::parser_testing::{string_to_parser};
use visit;
use visit::Visitor;
// a visitor that extracts the paths
// from a given thingy and puts them in a mutable
// array (passed in to the traversal)
#[derive(Clone)]
struct PathExprFinderContext {
path_accumulator: Vec<ast::Path> ,
}
impl Visitor for PathExprFinderContext {
fn visit_expr(&mut self, expr: &ast::Expr) {
if let ast::ExprKind::Path(None, ref p) = expr.node {
self.path_accumulator.push(p.clone());
}
visit::walk_expr(self, expr);
}
}
// these following tests are quite fragile, in that they don't test what
// *kind* of failure occurs.
fn test_ecfg() -> ExpansionConfig<'static> {
ExpansionConfig::default("test".to_string())
}
// make sure that macros can't escape fns
#[should_panic]
#[test] fn macros_cant_escape_fns_test () {
let src = "fn bogus() {macro_rules! z (() => (3+4));}\
fn inty() -> i32 { z!() }".to_string();
let sess = parse::ParseSess::new();
let crate_ast = parse::parse_crate_from_source_str(
"<test>".to_string(),
src,
Vec::new(), &sess).unwrap();
// should fail:
let mut loader = DummyResolver;
let mut ecx = ExtCtxt::new(&sess, vec![], test_ecfg(), &mut loader);
expand_crate(&mut ecx, vec![], crate_ast);
}
// make sure that macros can't escape modules
#[should_panic]
#[test] fn macros_cant_escape_mods_test () {
let src = "mod foo {macro_rules! z (() => (3+4));}\
fn inty() -> i32 { z!() }".to_string();
let sess = parse::ParseSess::new();
let crate_ast = parse::parse_crate_from_source_str(
"<test>".to_string(),
src,
Vec::new(), &sess).unwrap();
let mut loader = DummyResolver;
let mut ecx = ExtCtxt::new(&sess, vec![], test_ecfg(), &mut loader);
expand_crate(&mut ecx, vec![], crate_ast);
}
// macro_use modules should allow macros to escape
#[test] fn macros_can_escape_flattened_mods_test () {
let src = "#[macro_use] mod foo {macro_rules! z (() => (3+4));}\
fn inty() -> i32 { z!() }".to_string();
let sess = parse::ParseSess::new();
let crate_ast = parse::parse_crate_from_source_str(
"<test>".to_string(),
src,
Vec::new(), &sess).unwrap();
let mut loader = DummyResolver;
let mut ecx = ExtCtxt::new(&sess, vec![], test_ecfg(), &mut loader);
expand_crate(&mut ecx, vec![], crate_ast);
}
fn expand_crate_str(crate_str: String) -> ast::Crate {
let ps = parse::ParseSess::new();
let crate_ast = panictry!(string_to_parser(&ps, crate_str).parse_crate_mod());
// the cfg argument actually does matter, here...
let mut loader = DummyResolver;
let mut ecx = ExtCtxt::new(&ps, vec![], test_ecfg(), &mut loader);
expand_crate(&mut ecx, vec![], crate_ast)
}
#[test] fn macro_tokens_should_match(){
expand_crate_str(
"macro_rules! m((a)=>(13)) ;fn main(){m!(a);}".to_string());
}
// should be able to use a bound identifier as a literal in a macro definition:
#[test] fn self_macro_parsing(){
expand_crate_str(
"macro_rules! foo ((zz) => (287;));
fn f(zz: i32) {foo!(zz);}".to_string()
);
}
// create a really evil test case where a $x appears inside a binding of $x
// but *shouldn't* bind because it was inserted by a different macro....
// can't write this test case until we have macro-generating macros.
}
|
$NetBSD: patch-.._vendor_serial-unix-0.4.0_src_error.rs,v 1.1 2023/08/30 08:30:17 pin Exp $
Add bsiegert@ unmerged pull request
https://github.com/dcuddeback/serial-rs/pull/63
--- ../vendor/serial-unix-0.4.0/src/error.rs.orig 2017-07-02 01:20:06.000000000 +0000
+++ ../vendor/serial-unix-0.4.0/src/error.rs
@@ -64,7 +64,7 @@ pub fn errno() -> i32 {
__dfly_error()
}
- #[cfg(target_os = "openbsd")]
+ #[cfg(any(target_os = "openbsd", target_os = "netbsd"))]
unsafe fn errno_location() -> *const c_int {
extern { fn __errno() -> *const c_int; }
__errno()
|
use crate::{backend::SchemaBuilder, prepare::*, types::*, SchemaStatementBuilder};
/// Drop a table
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let table = Table::drop()
/// .table(Glyph::Table)
/// .table(Char::Table)
/// .to_owned();
///
/// assert_eq!(
/// table.to_string(MysqlQueryBuilder),
/// r#"DROP TABLE `glyph`, `character`"#
/// );
/// assert_eq!(
/// table.to_string(PostgresQueryBuilder),
/// r#"DROP TABLE "glyph", "character""#
/// );
/// assert_eq!(
/// table.to_string(SqliteQueryBuilder),
/// r#"DROP TABLE `glyph`, `character`"#
/// );
/// ```
#[derive(Debug, Clone)]
pub struct TableDropStatement {
pub(crate) tables: Vec<DynIden>,
pub(crate) options: Vec<TableDropOpt>,
pub(crate) if_exists: bool,
}
/// All available table drop options
#[derive(Debug, Clone)]
pub enum TableDropOpt {
Restrict,
Cascade,
}
impl Default for TableDropStatement {
fn default() -> Self {
Self::new()
}
}
impl TableDropStatement {
/// Construct drop table statement
pub fn new() -> Self {
Self {
tables: Vec::new(),
options: Vec::new(),
if_exists: false,
}
}
/// Set table name
pub fn table<T: 'static>(mut self, table: T) -> Self
where
T: Iden,
{
self.tables.push(SeaRc::new(table));
self
}
/// Drop table if exists
pub fn if_exists(mut self) -> Self {
self.if_exists = true;
self
}
/// Drop option restrict
pub fn restrict(mut self) -> Self {
self.options.push(TableDropOpt::Restrict);
self
}
/// Drop option cacade
pub fn cascade(mut self) -> Self {
self.options.push(TableDropOpt::Cascade);
self
}
}
impl SchemaStatementBuilder for TableDropStatement {
fn build<T: SchemaBuilder>(&self, schema_builder: T) -> String {
let mut sql = SqlWriter::new();
schema_builder.prepare_table_drop_statement(self, &mut sql);
sql.result()
}
fn build_any(&self, schema_builder: &dyn SchemaBuilder) -> String {
let mut sql = SqlWriter::new();
schema_builder.prepare_table_drop_statement(self, &mut sql);
sql.result()
}
}
|
use anyhow::Context;
use pathfinder_lib::{
core::{Chain, StarknetBlockHash, StarknetBlockNumber},
sequencer::reply::{Block, Status},
state::block_hash::{verify_block_hash, VerifyResult},
storage::{
JournalMode, StarknetBlocksBlockId, StarknetBlocksTable, StarknetTransactionsTable, Storage,
},
};
use stark_hash::StarkHash;
/// Verify block hashes in a pathfinder database.
///
/// Iterates over all blocks in the database and verifies if the computed block hash matches
/// values we store for the block.
///
/// Usage:
/// `cargo run --release -p pathfinder --example verify_block_hashes mainnet ./mainnet.sqlite`
/// Either mainnet or goerli is accepted as the chain name.
fn main() -> anyhow::Result<()> {
let chain_name = std::env::args().nth(1).unwrap();
let chain = match chain_name.as_str() {
"mainnet" => Chain::Mainnet,
"goerli" => Chain::Goerli,
_ => panic!("Expected chain name: mainnet/goerli"),
};
let database_path = std::env::args().nth(2).unwrap();
let storage = Storage::migrate(database_path.into(), JournalMode::WAL)?;
let mut db = storage
.connection()
.context("Opening database connection")?;
let mut parent_block_hash = StarknetBlockHash(StarkHash::ZERO);
let latest_block_number = {
let tx = db.transaction().unwrap();
StarknetBlocksTable::get_latest_number(&tx)?.unwrap()
};
for block_number in 0..latest_block_number.0 {
let tx = db.transaction().unwrap();
let block_id = StarknetBlocksBlockId::Number(StarknetBlockNumber(block_number));
let block = StarknetBlocksTable::get(&tx, block_id)?.unwrap();
let transactions_and_receipts =
StarknetTransactionsTable::get_transaction_data_for_block(&tx, block_id)?;
drop(tx);
let block_hash = block.hash;
let (transactions, receipts): (Vec<_>, Vec<_>) =
transactions_and_receipts.into_iter().unzip();
let block = Block {
block_hash: block.hash,
block_number: block.number,
gas_price: Some(block.gas_price),
parent_block_hash,
sequencer_address: Some(block.sequencer_address),
state_root: block.root,
status: Status::AcceptedOnL1,
timestamp: block.timestamp,
transaction_receipts: receipts,
transactions,
starknet_version: None,
};
parent_block_hash = block_hash;
let result = verify_block_hash(&block, chain, block_hash)?;
match result {
VerifyResult::Match => {}
VerifyResult::NotVerifiable => println!(
"Block hash cannot be verified for block number {} hash {:?}",
block_number, block_hash
),
VerifyResult::Mismatch => println!(
"Block hash mismatch at block number {} hash {:?}",
block_number, block_hash
),
}
}
Ok(())
}
|
///
/// Helper trait to convert C string into Rust one.
///
/// Example:
///
/// ```rust
/// use qas::prelude::*;
///
/// qas!("tests/c/string.c");
///
/// fn main() {
/// assert_eq!(unsafe { hi().to_rust() }, "Hi there")
/// }
/// ```
///
pub unsafe trait CStringToRust {
///
/// Unsafe because caller has to guarantee that `self` is a valid pointer
///
unsafe fn to_rust(&self) -> &str;
}
///
/// Helper trait to convert Rust string into C one.
///
/// WARNING: the `to_c` method <i>does</i> check if string has null-terminator,
///
/// but `to_c_unchecked` <i>doesn't</i>.
///
/// if you want to convert a <i>C string</i> that was converted into Rust, you can just call
///
/// `to_c` or even `to_c_unchecked`,
///
/// but if you want to convert a <i>native</i> Rust string into C String,
///
/// you <i>really</i> have to ensure that you string contains null-terminator - that's what `to_c` does.
///
/// Example:
///
/// ```rust
/// use qas::prelude::*;
///
/// qas!("tests/c/string.c");
///
/// fn main() {
/// // contains null-terminator!
/// assert_eq!(unsafe { return_same("Rusty\0".to_c()).to_rust() }, "Rusty");
///
/// assert_eq!(unsafe { hi().to_rust().to_c().to_rust() }, "Hi there")
/// }
/// ```
///
pub unsafe trait RustStringToC {
///
/// Unsafe because caller has to guarantee that `self` contains null-terminator
///
unsafe fn to_c(&self) -> *const u8;
///
/// Unsafe because C string is returned may not be valid
///
unsafe fn to_c_unchecked(&self) -> *const u8;
}
///
/// Helper trait to implement things such as `++` operator
///
pub trait Integer: Copy + Sized {
/// Add x to self
///
/// Example:
/// ```rust
/// use qas::prelude::*;
///
/// let mut x: i32 = 18;
/// x.add_one_u8(1);
/// assert_eq!(x, 19)
/// ```
fn add_one_u8(&mut self, x: u8);
/// Subtract x to self
///
/// Example:
/// ```rust
/// use qas::prelude::*;
///
/// let mut x: i32 = 18;
/// x.sub_one_u8(1);
/// assert_eq!(x, 17)
/// ```
fn sub_one_u8(&mut self, x: u8);
}
unsafe impl CStringToRust for *const u8 {
unsafe fn to_rust(&self) -> &str {
let len = strlen(*self);
let raw = core::slice::from_raw_parts(*self, len);
core::str::from_utf8(raw).unwrap()
}
}
unsafe impl RustStringToC for str {
unsafe fn to_c(&self) -> *const u8 {
match self.chars().next_back() {
Some(zero) if zero == '\0' => (),
_ => {
let len = self.len();
let start = self.as_ptr();
let byte_after_address = (start as usize + len) as *const u8;
let byte_after = *byte_after_address;
assert_eq!(byte_after, b'\0', "string is not terminated by null")
}
}
self.to_c_unchecked()
}
#[inline(always)]
unsafe fn to_c_unchecked(&self) -> *const u8 {
self.as_ptr()
}
}
///
/// Calculates length of C String
///
unsafe fn strlen(mut s: *const u8) -> usize {
let start = s;
while *s != 0 {
s = (s as usize + 1) as *const u8;
}
s as usize - start as usize
}
/// Helper macro to auto-implement trait `Integer` for all integers(and not only)
macro_rules! impl_num {
($($ty:ident)*) => {
$(impl Integer for $ty {
#[inline(always)]
fn add_one_u8(&mut self, x: u8) {
*self += x as Self;
}
#[inline(always)]
fn sub_one_u8(&mut self, x: u8) {
*self -= x as Self;
}
})*
};
}
impl_num!(u8 i8 u16 i16 u32 i32 u64 i64 u128 i128 usize isize);
|
#[doc = "Register `STAR` reader"]
pub type R = crate::R<STAR_SPEC>;
#[doc = "Field `CCRCFAIL` reader - Command response received (CRC check failed) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type CCRCFAIL_R = crate::BitReader;
#[doc = "Field `DCRCFAIL` reader - Data block sent/received (CRC check failed) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type DCRCFAIL_R = crate::BitReader;
#[doc = "Field `CTIMEOUT` reader - Command response timeout Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR. The Command Timeout period has a fixed value of 64 SDMMC_CK clock periods."]
pub type CTIMEOUT_R = crate::BitReader;
#[doc = "Field `DTIMEOUT` reader - Data timeout Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type DTIMEOUT_R = crate::BitReader;
#[doc = "Field `TXUNDERR` reader - Transmit FIFO underrun error (masked by hardware when IDMA is enabled) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type TXUNDERR_R = crate::BitReader;
#[doc = "Field `RXOVERR` reader - Received FIFO overrun error (masked by hardware when IDMA is enabled) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type RXOVERR_R = crate::BitReader;
#[doc = "Field `CMDREND` reader - Command response received (CRC check passed, or no CRC) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type CMDREND_R = crate::BitReader;
#[doc = "Field `CMDSENT` reader - Command sent (no response required) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type CMDSENT_R = crate::BitReader;
#[doc = "Field `DATAEND` reader - Data transfer ended correctly DATAEND is set if data counter DATACOUNT is zero and no errors occur, and no transmit data transfer hold. Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type DATAEND_R = crate::BitReader;
#[doc = "Field `DHOLD` reader - Data transfer Hold Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type DHOLD_R = crate::BitReader;
#[doc = "Field `DBCKEND` reader - Data block sent/received DBCKEND is set when: - CRC check passed and DPSM moves to the R_W state or - IDMAEN = 0 and transmit data transfer hold and DATACOUNT >0 and DPSM moves to Wait_S. Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type DBCKEND_R = crate::BitReader;
#[doc = "Field `DABORT` reader - Data transfer aborted by CMD12 Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type DABORT_R = crate::BitReader;
#[doc = "Field `DPSMACT` reader - Data path state machine active, i.e. not in Idle state This is a hardware status flag only, does not generate an interrupt."]
pub type DPSMACT_R = crate::BitReader;
#[doc = "Field `CPSMACT` reader - Command path state machine active, i.e. not in Idle state This is a hardware status flag only, does not generate an interrupt."]
pub type CPSMACT_R = crate::BitReader;
#[doc = "Field `TXFIFOHE` reader - Transmit FIFO half empty At least half the number of words can be written into the FIFO. This bit is cleared when the FIFO becomes half+1 full."]
pub type TXFIFOHE_R = crate::BitReader;
#[doc = "Field `RXFIFOHF` reader - Receive FIFO half full There are at least half the number of words in the FIFO. This bit is cleared when the FIFO becomes half+1 empty."]
pub type RXFIFOHF_R = crate::BitReader;
#[doc = "Field `TXFIFOF` reader - Transmit FIFO full This is a hardware status flag only, does not generate an interrupt. This bit is cleared when one FIFO location becomes empty."]
pub type TXFIFOF_R = crate::BitReader;
#[doc = "Field `RXFIFOF` reader - Receive FIFO full This bit is cleared when one FIFO location becomes empty."]
pub type RXFIFOF_R = crate::BitReader;
#[doc = "Field `TXFIFOE` reader - Transmit FIFO empty This bit is cleared when one FIFO location becomes full."]
pub type TXFIFOE_R = crate::BitReader;
#[doc = "Field `RXFIFOE` reader - Receive FIFO empty This is a hardware status flag only, does not generate an interrupt. This bit is cleared when one FIFO location becomes full."]
pub type RXFIFOE_R = crate::BitReader;
#[doc = "Field `BUSYD0` reader - Inverted value of SDMMC_D0 line (Busy), sampled at the end of a CMD response and a second time 2 SDMMC_CK cycles after the CMD response This bit is reset to not busy when the SDMMCD0 line changes from busy to not busy. This bit does not signal busy due to data transfer. This is a hardware status flag only, it does not generate an interrupt."]
pub type BUSYD0_R = crate::BitReader;
#[doc = "Field `BUSYD0END` reader - end of SDMMC_D0 Busy following a CMD response detected This indicates only end of busy following a CMD response. This bit does not signal busy due to data transfer. Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type BUSYD0END_R = crate::BitReader;
#[doc = "Field `SDIOIT` reader - SDIO interrupt received The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type SDIOIT_R = crate::BitReader;
#[doc = "Field `ACKFAIL` reader - Boot acknowledgment received (boot acknowledgment check fail) The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type ACKFAIL_R = crate::BitReader;
#[doc = "Field `ACKTIMEOUT` reader - Boot acknowledgment timeout The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type ACKTIMEOUT_R = crate::BitReader;
#[doc = "Field `VSWEND` reader - Voltage switch critical timing section completion The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type VSWEND_R = crate::BitReader;
#[doc = "Field `CKSTOP` reader - SDMMC_CK stopped in Voltage switch procedure The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type CKSTOP_R = crate::BitReader;
#[doc = "Field `IDMATE` reader - IDMA transfer error The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type IDMATE_R = crate::BitReader;
#[doc = "Field `IDMABTC` reader - IDMA buffer transfer complete The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
pub type IDMABTC_R = crate::BitReader;
impl R {
#[doc = "Bit 0 - Command response received (CRC check failed) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn ccrcfail(&self) -> CCRCFAIL_R {
CCRCFAIL_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - Data block sent/received (CRC check failed) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn dcrcfail(&self) -> DCRCFAIL_R {
DCRCFAIL_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - Command response timeout Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR. The Command Timeout period has a fixed value of 64 SDMMC_CK clock periods."]
#[inline(always)]
pub fn ctimeout(&self) -> CTIMEOUT_R {
CTIMEOUT_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - Data timeout Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn dtimeout(&self) -> DTIMEOUT_R {
DTIMEOUT_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - Transmit FIFO underrun error (masked by hardware when IDMA is enabled) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn txunderr(&self) -> TXUNDERR_R {
TXUNDERR_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - Received FIFO overrun error (masked by hardware when IDMA is enabled) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn rxoverr(&self) -> RXOVERR_R {
RXOVERR_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - Command response received (CRC check passed, or no CRC) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn cmdrend(&self) -> CMDREND_R {
CMDREND_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - Command sent (no response required) Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn cmdsent(&self) -> CMDSENT_R {
CMDSENT_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 8 - Data transfer ended correctly DATAEND is set if data counter DATACOUNT is zero and no errors occur, and no transmit data transfer hold. Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn dataend(&self) -> DATAEND_R {
DATAEND_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - Data transfer Hold Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn dhold(&self) -> DHOLD_R {
DHOLD_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - Data block sent/received DBCKEND is set when: - CRC check passed and DPSM moves to the R_W state or - IDMAEN = 0 and transmit data transfer hold and DATACOUNT >0 and DPSM moves to Wait_S. Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn dbckend(&self) -> DBCKEND_R {
DBCKEND_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - Data transfer aborted by CMD12 Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn dabort(&self) -> DABORT_R {
DABORT_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - Data path state machine active, i.e. not in Idle state This is a hardware status flag only, does not generate an interrupt."]
#[inline(always)]
pub fn dpsmact(&self) -> DPSMACT_R {
DPSMACT_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 13 - Command path state machine active, i.e. not in Idle state This is a hardware status flag only, does not generate an interrupt."]
#[inline(always)]
pub fn cpsmact(&self) -> CPSMACT_R {
CPSMACT_R::new(((self.bits >> 13) & 1) != 0)
}
#[doc = "Bit 14 - Transmit FIFO half empty At least half the number of words can be written into the FIFO. This bit is cleared when the FIFO becomes half+1 full."]
#[inline(always)]
pub fn txfifohe(&self) -> TXFIFOHE_R {
TXFIFOHE_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 15 - Receive FIFO half full There are at least half the number of words in the FIFO. This bit is cleared when the FIFO becomes half+1 empty."]
#[inline(always)]
pub fn rxfifohf(&self) -> RXFIFOHF_R {
RXFIFOHF_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bit 16 - Transmit FIFO full This is a hardware status flag only, does not generate an interrupt. This bit is cleared when one FIFO location becomes empty."]
#[inline(always)]
pub fn txfifof(&self) -> TXFIFOF_R {
TXFIFOF_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bit 17 - Receive FIFO full This bit is cleared when one FIFO location becomes empty."]
#[inline(always)]
pub fn rxfifof(&self) -> RXFIFOF_R {
RXFIFOF_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 18 - Transmit FIFO empty This bit is cleared when one FIFO location becomes full."]
#[inline(always)]
pub fn txfifoe(&self) -> TXFIFOE_R {
TXFIFOE_R::new(((self.bits >> 18) & 1) != 0)
}
#[doc = "Bit 19 - Receive FIFO empty This is a hardware status flag only, does not generate an interrupt. This bit is cleared when one FIFO location becomes full."]
#[inline(always)]
pub fn rxfifoe(&self) -> RXFIFOE_R {
RXFIFOE_R::new(((self.bits >> 19) & 1) != 0)
}
#[doc = "Bit 20 - Inverted value of SDMMC_D0 line (Busy), sampled at the end of a CMD response and a second time 2 SDMMC_CK cycles after the CMD response This bit is reset to not busy when the SDMMCD0 line changes from busy to not busy. This bit does not signal busy due to data transfer. This is a hardware status flag only, it does not generate an interrupt."]
#[inline(always)]
pub fn busyd0(&self) -> BUSYD0_R {
BUSYD0_R::new(((self.bits >> 20) & 1) != 0)
}
#[doc = "Bit 21 - end of SDMMC_D0 Busy following a CMD response detected This indicates only end of busy following a CMD response. This bit does not signal busy due to data transfer. Interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn busyd0end(&self) -> BUSYD0END_R {
BUSYD0END_R::new(((self.bits >> 21) & 1) != 0)
}
#[doc = "Bit 22 - SDIO interrupt received The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn sdioit(&self) -> SDIOIT_R {
SDIOIT_R::new(((self.bits >> 22) & 1) != 0)
}
#[doc = "Bit 23 - Boot acknowledgment received (boot acknowledgment check fail) The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn ackfail(&self) -> ACKFAIL_R {
ACKFAIL_R::new(((self.bits >> 23) & 1) != 0)
}
#[doc = "Bit 24 - Boot acknowledgment timeout The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn acktimeout(&self) -> ACKTIMEOUT_R {
ACKTIMEOUT_R::new(((self.bits >> 24) & 1) != 0)
}
#[doc = "Bit 25 - Voltage switch critical timing section completion The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn vswend(&self) -> VSWEND_R {
VSWEND_R::new(((self.bits >> 25) & 1) != 0)
}
#[doc = "Bit 26 - SDMMC_CK stopped in Voltage switch procedure The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn ckstop(&self) -> CKSTOP_R {
CKSTOP_R::new(((self.bits >> 26) & 1) != 0)
}
#[doc = "Bit 27 - IDMA transfer error The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn idmate(&self) -> IDMATE_R {
IDMATE_R::new(((self.bits >> 27) & 1) != 0)
}
#[doc = "Bit 28 - IDMA buffer transfer complete The interrupt flag is cleared by writing corresponding interrupt clear bit in SDMMC_ICR."]
#[inline(always)]
pub fn idmabtc(&self) -> IDMABTC_R {
IDMABTC_R::new(((self.bits >> 28) & 1) != 0)
}
}
#[doc = "SDMMC status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`star::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct STAR_SPEC;
impl crate::RegisterSpec for STAR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`star::R`](R) reader structure"]
impl crate::Readable for STAR_SPEC {}
#[doc = "`reset()` method sets STAR to value 0"]
impl crate::Resettable for STAR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#![allow(dead_code)]
// Fully Qualified Syntax 提供一种无歧义的函数调用语法,允许程序员精确地
// 指定想调用的是那个函数。以前也叫 UFCS(universal function call syntax),
// 也就是所谓的"通用函数调用语法"。
// 这个语法可以允许使用类似的写法精确调用任何方法,包括成员方法和静态方法
// 其他一切函数调用语法都是它的某种简略形式
// 它的具体写法为 <T as TraitName>::item
trait Cook {
fn start(&self);
}
trait Wash {
fn start(&self);
}
struct Chef;
impl Cook for Chef {
fn start(&self) {
println!("Cook::start");
}
}
impl Wash for Chef {
fn start(&self) {
println!("Wash::start");
}
}
pub fn learn_call_func() {
let me = Chef;
// me.start();
// 必要使用完整的函数调用语法来进行方法调用,只有这样写,
// 才能清晰明白且无歧义地表达清楚期望调用的是哪个函数
<dyn Cook>::start(&me);
<Chef as Wash>::start(&me);
}
// 由此我们也可以看到, 所谓的"成员方法"也没什么特殊之处,它跟普通的静态方法
// 的唯一区别是,第一个参数是 self,而这个 self 只是一个普通的函数参数而已
// 只不过这种成员方法也可以通过变量加小数点的方式调用
// 变量加小数点的调用方式在大部分情况下看起来更简单更美观,完全可以视为一种语法糖
// 需要注意的是,通过小数点语法调用方法调用,有一个"隐藏着"的"取引用"步骤
// 虽然我们看起来源代码长的是这个样子 me.start(),但是大家心里要清楚,
// 真正传递给 start() 方法的参数是 &me 而不是 me,这一步是编译器自动帮我们做的
// 不论这个方法接受的 self 参数究竟是 Self、&Self 还是 &mut Self ,最终在源码上,
// 我们都是统一的写法: variable.method()。而如果用 UFCS 语法来调用这个方法,
// 我们就不能让编译器帮我们自动取引用了,必须手动写清楚。
struct T(usize);
impl T {
fn get1(&self) -> usize {
self.0
}
fn get2(&self) -> usize {
self.0
}
}
fn get3(t: &T) -> usize {
t.0
}
fn check_type(_: fn(&T) -> usize) {}
pub fn learn_call_func1() {
// get1、get2 和 get3 都可以自动转成 fn(&T)-> usize 类型
check_type(T::get1);
check_type(T::get2);
check_type(get3);
}
|
// This file is part of Substrate.
// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Staking Module
//!
//! The Staking module is used to manage funds at stake by network maintainers.
//!
//! - [`staking::Trait`](./trait.Trait.html)
//! - [`Call`](./enum.Call.html)
//! - [`Module`](./struct.Module.html)
//!
//! ## Overview
//!
//! The Staking module is the means by which a set of network maintainers (known as _authorities_ in
//! some contexts and _validators_ in others) are chosen based upon those who voluntarily place
//! funds under deposit. Under deposit, those funds are rewarded under normal operation but are held
//! at pain of _slash_ (expropriation) should the staked maintainer be found not to be discharging
//! its duties properly.
//!
//! ### Terminology
//! <!-- Original author of paragraph: @gavofyork -->
//!
//! - Staking: The process of locking up funds for some time, placing them at risk of slashing
//! (loss) in order to become a rewarded maintainer of the network.
//! - Validating: The process of running a node to actively maintain the network, either by
//! producing blocks or guaranteeing finality of the chain.
//! - Nominating: The process of placing staked funds behind one or more validators in order to
//! share in any reward, and punishment, they take.
//! - Stash account: The account holding an owner's funds used for staking.
//! - Controller account: The account that controls an owner's funds for staking.
//! - Era: A (whole) number of sessions, which is the period that the validator set (and each
//! validator's active nominator set) is recalculated and where rewards are paid out.
//! - Slash: The punishment of a staker by reducing its funds.
//!
//! ### Goals
//! <!-- Original author of paragraph: @gavofyork -->
//!
//! The staking system in Substrate NPoS is designed to make the following possible:
//!
//! - Stake funds that are controlled by a cold wallet.
//! - Withdraw some, or deposit more, funds without interrupting the role of an entity.
//! - Switch between roles (nominator, validator, idle) with minimal overhead.
//!
//! ### Scenarios
//!
//! #### Staking
//!
//! Almost any interaction with the Staking module requires a process of _**bonding**_ (also known
//! as being a _staker_). To become *bonded*, a fund-holding account known as the _stash account_,
//! which holds some or all of the funds that become frozen in place as part of the staking process,
//! is paired with an active **controller** account, which issues instructions on how they shall be
//! used.
//!
//! An account pair can become bonded using the [`bond`](./enum.Call.html#variant.bond) call.
//!
//! Stash accounts can change their associated controller using the
//! [`set_controller`](./enum.Call.html#variant.set_controller) call.
//!
//! There are three possible roles that any staked account pair can be in: `Validator`, `Nominator`
//! and `Idle` (defined in [`StakerStatus`](./enum.StakerStatus.html)). There are three
//! corresponding instructions to change between roles, namely:
//! [`validate`](./enum.Call.html#variant.validate),
//! [`nominate`](./enum.Call.html#variant.nominate), and [`chill`](./enum.Call.html#variant.chill).
//!
//! #### Validating
//!
//! A **validator** takes the role of either validating blocks or ensuring their finality,
//! maintaining the veracity of the network. A validator should avoid both any sort of malicious
//! misbehavior and going offline. Bonded accounts that state interest in being a validator do NOT
//! get immediately chosen as a validator. Instead, they are declared as a _candidate_ and they
//! _might_ get elected at the _next era_ as a validator. The result of the election is determined
//! by nominators and their votes.
//!
//! An account can become a validator candidate via the
//! [`validate`](./enum.Call.html#variant.validate) call.
//!
//! #### Nomination
//!
//! A **nominator** does not take any _direct_ role in maintaining the network, instead, it votes on
//! a set of validators to be elected. Once interest in nomination is stated by an account, it
//! takes effect at the next election round. The funds in the nominator's stash account indicate the
//! _weight_ of its vote. Both the rewards and any punishment that a validator earns are shared
//! between the validator and its nominators. This rule incentivizes the nominators to NOT vote for
//! the misbehaving/offline validators as much as possible, simply because the nominators will also
//! lose funds if they vote poorly.
//!
//! An account can become a nominator via the [`nominate`](enum.Call.html#variant.nominate) call.
//!
//! #### Rewards and Slash
//!
//! The **reward and slashing** procedure is the core of the Staking module, attempting to _embrace
//! valid behavior_ while _punishing any misbehavior or lack of availability_.
//!
//! Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the
//! `payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to the
//! validator as well as its nominators. Only the [`Trait::MaxNominatorRewardedPerValidator`]
//! biggest stakers can claim their reward. This is to limit the i/o cost to mutate storage for each
//! nominator's account.
//!
//! Slashing can occur at any point in time, once misbehavior is reported. Once slashing is
//! determined, a value is deducted from the balance of the validator and all the nominators who
//! voted for this validator (values are deducted from the _stash_ account of the slashed entity).
//!
//! Slashing logic is further described in the documentation of the `slashing` module.
//!
//! Similar to slashing, rewards are also shared among a validator and its associated nominators.
//! Yet, the reward funds are not always transferred to the stash account and can be configured. See
//! [Reward Calculation](#reward-calculation) for more details.
//!
//! #### Chilling
//!
//! Finally, any of the roles above can choose to step back temporarily and just chill for a while.
//! This means that if they are a nominator, they will not be considered as voters anymore and if
//! they are validators, they will no longer be a candidate for the next election.
//!
//! An account can step back via the [`chill`](enum.Call.html#variant.chill) call.
//!
//! ### Session managing
//!
//! The module implement the trait `SessionManager`. Which is the only API to query new validator
//! set and allowing these validator set to be rewarded once their era is ended.
//!
//! ## Interface
//!
//! ### Dispatchable Functions
//!
//! The dispatchable functions of the Staking module enable the steps needed for entities to accept
//! and change their role, alongside some helper functions to get/set the metadata of the module.
//!
//! ### Public Functions
//!
//! The Staking module contains many public storage items and (im)mutable functions.
//!
//! ## Usage
//!
//! ### Example: Rewarding a validator by id.
//!
//! ```
//! use frame_support::{decl_module, dispatch};
//! use frame_system::ensure_signed;
//! use pallet_staking::{self as staking};
//!
//! pub trait Trait: staking::Trait {}
//!
//! decl_module! {
//! pub struct Module<T: Trait> for enum Call where origin: T::Origin {
//! /// Reward a validator.
//! #[weight = 0]
//! pub fn reward_myself(origin) -> dispatch::DispatchResult {
//! let reported = ensure_signed(origin)?;
//! <staking::Module<T>>::reward_by_ids(vec![(reported, 10)]);
//! Ok(())
//! }
//! }
//! }
//! # fn main() { }
//! ```
//!
//! ## Implementation Details
//!
//! ### Era payout
//!
//! The era payout is computed using yearly inflation curve defined at
//! [`T::RewardCurve`](./trait.Trait.html#associatedtype.RewardCurve) as such:
//!
//! ```nocompile
//! staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year
//! ```
//! This payout is used to reward stakers as defined in next section
//!
//!
//! ```nocompile
//! remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout
//! ```
//! The remaining reward is send to the configurable end-point
//! [`T::RewardRemainder`](./trait.Trait.html#associatedtype.RewardRemainder).
//!
//! ### Reward Calculation
//!
//! Validators and nominators are rewarded at the end of each era. The total reward of an era is
//! calculated using the era duration and the staking rate (the total amount of tokens staked by
//! nominators and validators, divided by the total token supply). It aims to incentivize toward a
//! defined staking rate. The full specification can be found
//! [here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model).
//!
//! Total reward is split among validators and their nominators depending on the number of points
//! they received during the era. Points are added to a validator using
//! [`reward_by_ids`](./enum.Call.html#variant.reward_by_ids) or
//! [`reward_by_indices`](./enum.Call.html#variant.reward_by_indices).
//!
//! [`Module`](./struct.Module.html) implements
//! [`pallet_authorship::EventHandler`](../pallet_authorship/trait.EventHandler.html) to add reward
//! points to block producer and block producer of referenced uncles.
//!
//! The validator and its nominator split their reward as following:
//!
//! The validator can declare an amount, named
//! [`commission`](./struct.ValidatorPrefs.html#structfield.commission), that does not get shared
//! with the nominators at each reward payout through its
//! [`ValidatorPrefs`](./struct.ValidatorPrefs.html). This value gets deducted from the total reward
//! that is paid to the validator and its nominators. The remaining portion is split among the
//! validator and all of the nominators that nominated the validator, proportional to the value
//! staked behind this validator (_i.e._ dividing the
//! [`own`](./struct.Exposure.html#structfield.own) or
//! [`others`](./struct.Exposure.html#structfield.others) by
//! [`total`](./struct.Exposure.html#structfield.total) in [`Exposure`](./struct.Exposure.html)).
//!
//! All entities who receive a reward have the option to choose their reward destination through the
//! [`Payee`](./struct.Payee.html) storage item (see
//! [`set_payee`](enum.Call.html#variant.set_payee)), to be one of the following:
//!
//! - Controller account, (obviously) not increasing the staked value.
//! - Stash account, not increasing the staked value.
//! - Stash account, also increasing the staked value.
//!
//! ### Additional Fund Management Operations
//!
//! Any funds already placed into stash can be the target of the following operations:
//!
//! The controller account can free a portion (or all) of the funds using the
//! [`unbond`](enum.Call.html#variant.unbond) call. Note that the funds are not immediately
//! accessible. Instead, a duration denoted by [`BondingDuration`](./struct.BondingDuration.html)
//! (in number of eras) must pass until the funds can actually be removed. Once the
//! `BondingDuration` is over, the [`withdraw_unbonded`](./enum.Call.html#variant.withdraw_unbonded)
//! call can be used to actually withdraw the funds.
//!
//! Note that there is a limitation to the number of fund-chunks that can be scheduled to be
//! unlocked in the future via [`unbond`](enum.Call.html#variant.unbond). In case this maximum
//! (`MAX_UNLOCKING_CHUNKS`) is reached, the bonded account _must_ first wait until a successful
//! call to `withdraw_unbonded` to remove some of the chunks.
//!
//! ### Election Algorithm
//!
//! The current election algorithm is implemented based on Phragmén. The reference implementation
//! can be found [here](https://github.com/w3f/consensus/tree/master/NPoS).
//!
//! The election algorithm, aside from electing the validators with the most stake value and votes,
//! tries to divide the nominator votes among candidates in an equal manner. To further assure this,
//! an optional post-processing can be applied that iteratively normalizes the nominator staked
//! values until the total difference among votes of a particular nominator are less than a
//! threshold.
//!
//! ## GenesisConfig
//!
//! The Staking module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). The
//! `GenesisConfig` is optional and allow to set some initial stakers.
//!
//! ## Related Modules
//!
//! - [Balances](../pallet_balances/index.html): Used to manage values at stake.
//! - [Session](../pallet_session/index.html): Used to manage sessions. Also, a list of new
//! validators is stored in the Session module's `Validators` at the end of each era.
#![recursion_limit = "128"]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(any(feature = "runtime-benchmarks", test))]
pub mod benchmarking;
#[cfg(test)]
mod mock;
#[cfg(any(feature = "runtime-benchmarks", test))]
pub mod testing_utils;
#[cfg(test)]
mod tests;
pub mod default_weights;
pub mod inflation;
pub mod offchain_election;
pub mod slashing;
use codec::{Decode, Encode, HasCompact};
use frame_support::{
decl_error, decl_event, decl_module, decl_storage,
dispatch::{
DispatchErrorWithPostInfo, DispatchResult, DispatchResultWithPostInfo, IsSubType,
WithPostDispatchInfo,
},
ensure,
storage::IterableStorageMap,
traits::{
Currency, EnsureOrigin, EstimateNextNewSession, Get, Imbalance, LockIdentifier,
LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons,
},
weights::{
constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS},
Weight,
},
};
use frame_system::{
self as system, ensure_none, ensure_root, ensure_signed, offchain::SendTransactionTypes,
};
use pallet_session::historical;
use sp_npos_elections::{
build_support_map, evaluate_support, generate_solution_type, is_score_better, seq_phragmen,
Assignment, ElectionResult as PrimitiveElectionResult, ElectionScore, ExtendedBalance,
SupportMap, VoteWeight, VotingLimit,
};
use sp_runtime::{
curve::PiecewiseLinear,
traits::{
AtLeast32BitUnsigned, CheckedSub, Convert, Dispatchable, SaturatedConversion, Saturating,
StaticLookup, Zero,
},
transaction_validity::{
InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity,
TransactionValidityError, ValidTransaction,
},
DispatchError, InnerOf, PerThing, PerU16, Perbill, Percent, RuntimeDebug,
};
#[cfg(feature = "std")]
use sp_runtime::{Deserialize, Serialize};
use sp_staking::{
offence::{Offence, OffenceDetails, OffenceError, OnOffenceHandler, ReportOffence},
SessionIndex,
};
use sp_std::{
collections::btree_map::BTreeMap,
convert::{From, TryInto},
mem::size_of,
prelude::*,
result,
};
const STAKING_ID: LockIdentifier = *b"staking ";
pub const MAX_UNLOCKING_CHUNKS: usize = 32;
pub const MAX_NOMINATIONS: usize = <CompactAssignments as VotingLimit>::LIMIT;
pub(crate) const LOG_TARGET: &'static str = "staking";
// syntactic sugar for logging.
#[macro_export]
macro_rules! log {
($level:tt, $patter:expr $(, $values:expr)* $(,)?) => {
frame_support::debug::$level!(
target: crate::LOG_TARGET,
$patter $(, $values)*
)
};
}
/// Data type used to index nominators in the compact type
pub type NominatorIndex = u32;
/// Data type used to index validators in the compact type.
pub type ValidatorIndex = u16;
// Ensure the size of both ValidatorIndex and NominatorIndex. They both need to be well below usize.
static_assertions::const_assert!(size_of::<ValidatorIndex>() <= size_of::<usize>());
static_assertions::const_assert!(size_of::<NominatorIndex>() <= size_of::<usize>());
static_assertions::const_assert!(size_of::<ValidatorIndex>() <= size_of::<u32>());
static_assertions::const_assert!(size_of::<NominatorIndex>() <= size_of::<u32>());
/// Maximum number of stakers that can be stored in a snapshot.
pub(crate) const MAX_VALIDATORS: usize = ValidatorIndex::max_value() as usize;
pub(crate) const MAX_NOMINATORS: usize = NominatorIndex::max_value() as usize;
/// Counter for the number of eras that have passed.
pub type EraIndex = u32;
/// Counter for the number of "reward" points earned by a given validator.
pub type RewardPoint = u32;
// Note: Maximum nomination limit is set here -- 16.
generate_solution_type!(
#[compact]
pub struct CompactAssignments::<NominatorIndex, ValidatorIndex, OffchainAccuracy>(16)
);
/// Accuracy used for on-chain election.
pub type ChainAccuracy = Perbill;
/// Accuracy used for off-chain election. This better be small.
pub type OffchainAccuracy = PerU16;
/// The balance type of this module.
pub type BalanceOf<T> =
<<T as Trait>::Currency as Currency<<T as frame_system::Trait>::AccountId>>::Balance;
type PositiveImbalanceOf<T> =
<<T as Trait>::Currency as Currency<<T as frame_system::Trait>::AccountId>>::PositiveImbalance;
type NegativeImbalanceOf<T> =
<<T as Trait>::Currency as Currency<<T as frame_system::Trait>::AccountId>>::NegativeImbalance;
/// Information regarding the active era (era in used in session).
#[derive(Encode, Decode, RuntimeDebug)]
pub struct ActiveEraInfo {
/// Index of era.
pub index: EraIndex,
/// Moment of start expressed as millisecond from `$UNIX_EPOCH`.
///
/// Start can be none if start hasn't been set for the era yet,
/// Start is set on the first on_finalize of the era to guarantee usage of `Time`.
start: Option<u64>,
}
/// Reward points of an era. Used to split era total payout between validators.
///
/// This points will be used to reward validators and their respective nominators.
#[derive(PartialEq, Encode, Decode, Default, RuntimeDebug)]
pub struct EraRewardPoints<AccountId: Ord> {
/// Total number of points. Equals the sum of reward points for each validator.
total: RewardPoint,
/// The reward points earned by a given validator.
individual: BTreeMap<AccountId, RewardPoint>,
}
/// Indicates the initial status of the staker.
#[derive(RuntimeDebug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum StakerStatus<AccountId> {
/// Chilling.
Idle,
/// Declared desire in validating or already participating in it.
Validator,
/// Nominating for a group of other stakers.
Nominator(Vec<AccountId>),
}
/// A destination account for payment.
#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug)]
pub enum RewardDestination<AccountId> {
/// Pay into the stash account, increasing the amount at stake accordingly.
Staked,
/// Pay into the stash account, not increasing the amount at stake.
Stash,
/// Pay into the controller account.
Controller,
/// Pay into a specified account.
Account(AccountId),
}
impl<AccountId> Default for RewardDestination<AccountId> {
fn default() -> Self {
RewardDestination::Staked
}
}
/// Preference of what happens regarding validation.
#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)]
pub struct ValidatorPrefs {
/// Reward that validator takes up-front; only the rest is split between themselves and
/// nominators.
#[codec(compact)]
pub commission: Perbill,
}
impl Default for ValidatorPrefs {
fn default() -> Self {
ValidatorPrefs { commission: Default::default() }
}
}
/// Just a Balance/BlockNumber tuple to encode when a chunk of funds will be unlocked.
#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)]
pub struct UnlockChunk<Balance: HasCompact> {
/// Amount of funds to be unlocked.
#[codec(compact)]
value: Balance,
/// Era number at which point it'll be unlocked.
#[codec(compact)]
era: EraIndex,
}
/// The ledger of a (bonded) stash.
#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)]
pub struct StakingLedger<AccountId, Balance: HasCompact> {
/// The stash account whose balance is actually locked and at stake.
pub stash: AccountId,
/// The total amount of the stash's balance that we are currently accounting for.
/// It's just `active` plus all the `unlocking` balances.
#[codec(compact)]
pub total: Balance,
/// The total amount of the stash's balance that will be at stake in any forthcoming
/// rounds.
#[codec(compact)]
pub active: Balance,
/// Any balance that is becoming free, which may eventually be transferred out
/// of the stash (assuming it doesn't get slashed first).
pub unlocking: Vec<UnlockChunk<Balance>>,
/// List of eras for which the stakers behind a validator have claimed rewards. Only updated
/// for validators.
pub claimed_rewards: Vec<EraIndex>,
}
impl<AccountId, Balance: HasCompact + Copy + Saturating + AtLeast32BitUnsigned>
StakingLedger<AccountId, Balance>
{
/// Remove entries from `unlocking` that are sufficiently old and reduce the
/// total by the sum of their balances.
fn consolidate_unlocked(self, current_era: EraIndex) -> Self {
let mut total = self.total;
let unlocking = self
.unlocking
.into_iter()
.filter(|chunk| {
if chunk.era > current_era {
true
} else {
total = total.saturating_sub(chunk.value);
false
}
})
.collect();
Self {
stash: self.stash,
total,
active: self.active,
unlocking,
claimed_rewards: self.claimed_rewards,
}
}
/// Re-bond funds that were scheduled for unlocking.
fn rebond(mut self, value: Balance) -> Self {
let mut unlocking_balance: Balance = Zero::zero();
while let Some(last) = self.unlocking.last_mut() {
if unlocking_balance + last.value <= value {
unlocking_balance += last.value;
self.active += last.value;
self.unlocking.pop();
} else {
let diff = value - unlocking_balance;
unlocking_balance += diff;
self.active += diff;
last.value -= diff;
}
if unlocking_balance >= value {
break
}
}
self
}
}
impl<AccountId, Balance> StakingLedger<AccountId, Balance>
where
Balance: AtLeast32BitUnsigned + Saturating + Copy,
{
/// Slash the validator for a given amount of balance. This can grow the value
/// of the slash in the case that the validator has less than `minimum_balance`
/// active funds. Returns the amount of funds actually slashed.
///
/// Slashes from `active` funds first, and then `unlocking`, starting with the
/// chunks that are closest to unlocking.
fn slash(&mut self, mut value: Balance, minimum_balance: Balance) -> Balance {
let pre_total = self.total;
let total = &mut self.total;
let active = &mut self.active;
let slash_out_of =
|total_remaining: &mut Balance, target: &mut Balance, value: &mut Balance| {
let mut slash_from_target = (*value).min(*target);
if !slash_from_target.is_zero() {
*target -= slash_from_target;
// don't leave a dust balance in the staking system.
if *target <= minimum_balance {
slash_from_target += *target;
*value += sp_std::mem::replace(target, Zero::zero());
}
*total_remaining = total_remaining.saturating_sub(slash_from_target);
*value -= slash_from_target;
}
};
slash_out_of(total, active, &mut value);
let i = self
.unlocking
.iter_mut()
.map(|chunk| {
slash_out_of(total, &mut chunk.value, &mut value);
chunk.value
})
.take_while(|value| value.is_zero()) // take all fully-consumed chunks out.
.count();
// kill all drained chunks.
let _ = self.unlocking.drain(..i);
pre_total.saturating_sub(*total)
}
}
/// A record of the nominations made by a specific account.
#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)]
pub struct Nominations<AccountId> {
/// The targets of nomination.
pub targets: Vec<AccountId>,
/// The era the nominations were submitted.
///
/// Except for initial nominations which are considered submitted at era 0.
pub submitted_in: EraIndex,
/// Whether the nominations have been suppressed. This can happen due to slashing of the
/// validators, or other events that might invalidate the nomination.
///
/// NOTE: this for future proofing and is thus far not used.
pub suppressed: bool,
}
/// The amount of exposure (to slashing) than an individual nominator has.
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug)]
pub struct IndividualExposure<AccountId, Balance: HasCompact> {
/// The stash account of the nominator in question.
pub who: AccountId,
/// Amount of funds exposed.
#[codec(compact)]
pub value: Balance,
}
/// A snapshot of the stake backing a single validator in the system.
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, Default, RuntimeDebug)]
pub struct Exposure<AccountId, Balance: HasCompact> {
/// The total balance backing this validator.
#[codec(compact)]
pub total: Balance,
/// The validator's own stash that is exposed.
#[codec(compact)]
pub own: Balance,
/// The portions of nominators stashes that are exposed.
pub others: Vec<IndividualExposure<AccountId, Balance>>,
}
/// A pending slash record. The value of the slash has been computed but not applied yet,
/// rather deferred for several eras.
#[derive(Encode, Decode, Default, RuntimeDebug)]
pub struct UnappliedSlash<AccountId, Balance: HasCompact> {
/// The stash ID of the offending validator.
validator: AccountId,
/// The validator's own slash.
own: Balance,
/// All other slashed stakers and amounts.
others: Vec<(AccountId, Balance)>,
/// Reporters of the offence; bounty payout recipients.
reporters: Vec<AccountId>,
/// The amount of payout.
payout: Balance,
}
/// Indicate how an election round was computed.
#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)]
pub enum ElectionCompute {
/// Result was forcefully computed on chain at the end of the session.
OnChain,
/// Result was submitted and accepted to the chain via a signed transaction.
Signed,
/// Result was submitted and accepted to the chain via an unsigned transaction (by an
/// authority).
Unsigned,
}
/// The result of an election round.
#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)]
pub struct ElectionResult<AccountId, Balance: HasCompact> {
/// Flat list of validators who have been elected.
elected_stashes: Vec<AccountId>,
/// Flat list of new exposures, to be updated in the [`Exposure`] storage.
exposures: Vec<(AccountId, Exposure<AccountId, Balance>)>,
/// Type of the result. This is kept on chain only to track and report the best score's
/// submission type. An optimisation could remove this.
compute: ElectionCompute,
}
/// The status of the upcoming (offchain) election.
#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)]
pub enum ElectionStatus<BlockNumber> {
/// Nothing has and will happen for now. submission window is not open.
Closed,
/// The submission window has been open since the contained block number.
Open(BlockNumber),
}
/// Some indications about the size of the election. This must be submitted with the solution.
///
/// Note that these values must reflect the __total__ number, not only those that are present in the
/// solution. In short, these should be the same size as the size of the values dumped in
/// `SnapshotValidators` and `SnapshotNominators`.
#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, Default)]
pub struct ElectionSize {
/// Number of validators in the snapshot of the current election round.
#[codec(compact)]
pub validators: ValidatorIndex,
/// Number of nominators in the snapshot of the current election round.
#[codec(compact)]
pub nominators: NominatorIndex,
}
impl<BlockNumber: PartialEq> ElectionStatus<BlockNumber> {
pub fn is_open_at(&self, n: BlockNumber) -> bool {
*self == Self::Open(n)
}
pub fn is_closed(&self) -> bool {
match self {
Self::Closed => true,
_ => false,
}
}
pub fn is_open(&self) -> bool {
!self.is_closed()
}
}
impl<BlockNumber> Default for ElectionStatus<BlockNumber> {
fn default() -> Self {
Self::Closed
}
}
/// Means for interacting with a specialized version of the `session` trait.
///
/// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Trait`
pub trait SessionInterface<AccountId>: frame_system::Trait {
/// Disable a given validator by stash ID.
///
/// Returns `true` if new era should be forced at the end of this session.
/// This allows preventing a situation where there is too many validators
/// disabled and block production stalls.
fn disable_validator(validator: &AccountId) -> Result<bool, ()>;
/// Get the validators from session.
fn validators() -> Vec<AccountId>;
/// Prune historical session tries up to but not including the given index.
fn prune_historical_up_to(up_to: SessionIndex);
}
impl<T: Trait> SessionInterface<<T as frame_system::Trait>::AccountId> for T
where
T: pallet_session::Trait<ValidatorId = <T as frame_system::Trait>::AccountId>,
T: pallet_session::historical::Trait<
FullIdentification = Exposure<<T as frame_system::Trait>::AccountId, BalanceOf<T>>,
FullIdentificationOf = ExposureOf<T>,
>,
T::SessionHandler: pallet_session::SessionHandler<<T as frame_system::Trait>::AccountId>,
T::SessionManager: pallet_session::SessionManager<<T as frame_system::Trait>::AccountId>,
T::ValidatorIdOf: Convert<
<T as frame_system::Trait>::AccountId,
Option<<T as frame_system::Trait>::AccountId>,
>,
{
fn disable_validator(validator: &<T as frame_system::Trait>::AccountId) -> Result<bool, ()> {
<pallet_session::Module<T>>::disable(validator)
}
fn validators() -> Vec<<T as frame_system::Trait>::AccountId> {
<pallet_session::Module<T>>::validators()
}
fn prune_historical_up_to(up_to: SessionIndex) {
<pallet_session::historical::Module<T>>::prune_up_to(up_to);
}
}
pub trait WeightInfo {
fn bond() -> Weight;
fn bond_extra() -> Weight;
fn unbond() -> Weight;
fn withdraw_unbonded_update(s: u32) -> Weight;
fn withdraw_unbonded_kill(s: u32) -> Weight;
fn validate() -> Weight;
fn nominate(n: u32) -> Weight;
fn chill() -> Weight;
fn set_payee() -> Weight;
fn set_controller() -> Weight;
fn set_validator_count() -> Weight;
fn force_no_eras() -> Weight;
fn force_new_era() -> Weight;
fn force_new_era_always() -> Weight;
fn set_invulnerables(v: u32) -> Weight;
fn force_unstake(s: u32) -> Weight;
fn cancel_deferred_slash(s: u32) -> Weight;
fn payout_stakers_alive_staked(n: u32) -> Weight;
fn payout_stakers_dead_controller(n: u32) -> Weight;
fn rebond(l: u32) -> Weight;
fn set_history_depth(e: u32) -> Weight;
fn reap_stash(s: u32) -> Weight;
fn new_era(v: u32, n: u32) -> Weight;
fn submit_solution_better(v: u32, n: u32, a: u32, w: u32) -> Weight;
}
pub trait Trait: frame_system::Trait + SendTransactionTypes<Call<Self>> {
/// The staking balance.
type Currency: LockableCurrency<Self::AccountId, Moment = Self::BlockNumber>;
/// Time used for computing era duration.
///
/// It is guaranteed to start being called from the first `on_finalize`. Thus value at genesis
/// is not used.
type UnixTime: UnixTime;
/// Convert a balance into a number used for election calculation. This must fit into a `u64`
/// but is allowed to be sensibly lossy. The `u64` is used to communicate with the
/// [`sp_npos_elections`] crate which accepts u64 numbers and does operations in 128.
/// Consequently, the backward convert is used convert the u128s from sp-elections back to a
/// [`BalanceOf`].
type CurrencyToVote: Convert<BalanceOf<Self>, VoteWeight> + Convert<u128, BalanceOf<Self>>;
/// Tokens have been minted and are unused for validator-reward.
/// See [Era payout](./index.html#era-payout).
type RewardRemainder: OnUnbalanced<NegativeImbalanceOf<Self>>;
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as frame_system::Trait>::Event>;
/// Handler for the unbalanced reduction when slashing a staker.
type Slash: OnUnbalanced<NegativeImbalanceOf<Self>>;
/// Handler for the unbalanced increment when rewarding a staker.
type Reward: OnUnbalanced<PositiveImbalanceOf<Self>>;
/// Number of sessions per era.
type SessionsPerEra: Get<SessionIndex>;
/// Number of eras that staked funds must remain bonded for.
type BondingDuration: Get<EraIndex>;
/// Number of eras that slashes are deferred by, after computation.
///
/// This should be less than the bonding duration. Set to 0 if slashes
/// should be applied immediately, without opportunity for intervention.
type SlashDeferDuration: Get<EraIndex>;
/// The origin which can cancel a deferred slash. Root can always do this.
type SlashCancelOrigin: EnsureOrigin<Self::Origin>;
/// Interface for interacting with a session module.
type SessionInterface: self::SessionInterface<Self::AccountId>;
/// The NPoS reward curve used to define yearly inflation.
/// See [Era payout](./index.html#era-payout).
type RewardCurve: Get<&'static PiecewiseLinear<'static>>;
/// Something that can estimate the next session change, accurately or as a best effort guess.
type NextNewSession: EstimateNextNewSession<Self::BlockNumber>;
/// The number of blocks before the end of the era from which election submissions are allowed.
///
/// Setting this to zero will disable the offchain compute and only on-chain seq-phragmen will
/// be used.
///
/// This is bounded by being within the last session. Hence, setting it to a value more than the
/// length of a session will be pointless.
type ElectionLookahead: Get<Self::BlockNumber>;
type MinBondAmount: Get<BalanceOf<Self>>;
/// The overarching call type.
type Call: Dispatchable + From<Call<Self>> + IsSubType<Call<Self>> + Clone;
/// Maximum number of balancing iterations to run in the offchain submission.
///
/// If set to 0, balance_solution will not be executed at all.
type MaxIterations: Get<u32>;
/// The threshold of improvement that should be provided for a new solution to be accepted.
type MinSolutionScoreBump: Get<Perbill>;
/// The maximum number of nominators rewarded for each validator.
///
/// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim
/// their reward. This used to limit the i/o cost for the nominator payout.
type MaxNominatorRewardedPerValidator: Get<u32>;
/// A configuration for base priority of unsigned transactions.
///
/// This is exposed so that it can be tuned for particular runtime, when
/// multiple pallets send unsigned transactions.
type UnsignedPriority: Get<TransactionPriority>;
/// Maximum weight that the unsigned transaction can have.
///
/// Chose this value with care. On one hand, it should be as high as possible, so the solution
/// can contain as many nominators/validators as possible. On the other hand, it should be small
/// enough to fit in the block.
type OffchainSolutionWeightLimit: Get<Weight>;
/// Weight information for extrinsics in this pallet.
type WeightInfo: WeightInfo;
}
/// Mode of era-forcing.
#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum Forcing {
/// Not forcing anything - just let whatever happen.
NotForcing,
/// Force a new era, then reset to `NotForcing` as soon as it is done.
ForceNew,
/// Avoid a new era indefinitely.
ForceNone,
/// Force a new era at the end of all sessions indefinitely.
ForceAlways,
}
impl Default for Forcing {
fn default() -> Self {
Forcing::NotForcing
}
}
// A value placed in storage that represents the current version of the Staking storage. This value
// is used by the `on_runtime_upgrade` logic to determine whether we run storage migration logic.
// This should match directly with the semantic versions of the Rust crate.
#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)]
enum Releases {
V1_0_0Ancient,
V2_0_0,
V3_0_0,
V4_0_0,
}
impl Default for Releases {
fn default() -> Self {
Releases::V4_0_0
}
}
decl_storage! {
trait Store for Module<T: Trait> as Staking {
/// Number of eras to keep in history.
///
/// Information is kept for eras in `[current_era - history_depth; current_era]`.
///
/// Must be more than the number of eras delayed by session otherwise. I.e. active era must
/// always be in history. I.e. `active_era > current_era - history_depth` must be
/// guaranteed.
HistoryDepth get(fn history_depth) config(): u32 = 84;
/// The ideal number of staking participants.
pub ValidatorCount get(fn validator_count) config(): u32;
/// Minimum number of staking participants before emergency conditions are imposed.
pub MinimumValidatorCount get(fn minimum_validator_count) config(): u32;
/// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're
/// easy to initialize and the performance hit is minimal (we expect no more than four
/// invulnerables) and restricted to testnets.
pub Invulnerables get(fn invulnerables) config(): Vec<T::AccountId>;
/// Map from all locked "stash" accounts to the controller account.
pub Bonded get(fn bonded): map hasher(twox_64_concat) T::AccountId => Option<T::AccountId>;
/// Map from all (unlocked) "controller" accounts to the info regarding the staking.
pub Ledger get(fn ledger):
map hasher(blake2_128_concat) T::AccountId
=> Option<StakingLedger<T::AccountId, BalanceOf<T>>>;
/// Where the reward payment should be made. Keyed by stash.
pub Payee get(fn payee): map hasher(twox_64_concat) T::AccountId => RewardDestination<T::AccountId>;
/// The map from (wannabe) validator stash key to the preferences of that validator.
pub Validators get(fn validators):
map hasher(twox_64_concat) T::AccountId => ValidatorPrefs;
/// The map from nominator stash key to the set of stash keys of all validators to nominate.
pub Nominators get(fn nominators):
map hasher(twox_64_concat) T::AccountId => Option<Nominations<T::AccountId>>;
/// The current era index.
///
/// This is the latest planned era, depending on how the Session pallet queues the validator
/// set, it might be active or not.
pub CurrentEra get(fn current_era): Option<EraIndex>;
/// The active era information, it holds index and start.
///
/// The active era is the era currently rewarded.
/// Validator set of this era must be equal to `SessionInterface::validators`.
pub ActiveEra get(fn active_era): Option<ActiveEraInfo>;
/// The session index at which the era start for the last `HISTORY_DEPTH` eras.
pub ErasStartSessionIndex get(fn eras_start_session_index):
map hasher(twox_64_concat) EraIndex => Option<SessionIndex>;
/// Era开始的区块时间
pub EraStartBlockNumber get(fn era_start_block_number): T::BlockNumber;
/// Exposure of validator at era.
///
/// This is keyed first by the era index to allow bulk deletion and then the stash account.
///
/// Is it removed after `HISTORY_DEPTH` eras.
/// If stakers hasn't been set or has been removed then empty exposure is returned.
pub ErasStakers get(fn eras_stakers):
double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId
=> Exposure<T::AccountId, BalanceOf<T>>;
/// Clipped Exposure of validator at era.
///
/// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the
/// `T::MaxNominatorRewardedPerValidator` biggest stakers.
/// (Note: the field `total` and `own` of the exposure remains unchanged).
/// This is used to limit the i/o cost for the nominator payout.
///
/// This is keyed fist by the era index to allow bulk deletion and then the stash account.
///
/// Is it removed after `HISTORY_DEPTH` eras.
/// If stakers hasn't been set or has been removed then empty exposure is returned.
pub ErasStakersClipped get(fn eras_stakers_clipped):
double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId
=> Exposure<T::AccountId, BalanceOf<T>>;
/// Similar to `ErasStakers`, this holds the preferences of validators.
///
/// This is keyed first by the era index to allow bulk deletion and then the stash account.
///
/// Is it removed after `HISTORY_DEPTH` eras.
// If prefs hasn't been set or has been removed then 0 commission is returned.
pub ErasValidatorPrefs get(fn eras_validator_prefs):
double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId
=> ValidatorPrefs;
/// The total validator era payout for the last `HISTORY_DEPTH` eras.
///
/// Eras that haven't finished yet or has been removed doesn't have reward.
pub ErasValidatorReward get(fn eras_validator_reward):
map hasher(twox_64_concat) EraIndex => Option<BalanceOf<T>>;
/// Rewards for the last `HISTORY_DEPTH` eras.
/// If reward hasn't been set or has been removed then 0 reward is returned.
pub ErasRewardPoints get(fn eras_reward_points):
map hasher(twox_64_concat) EraIndex => EraRewardPoints<T::AccountId>;
/// The total amount staked for the last `HISTORY_DEPTH` eras.
/// If total hasn't been set or has been removed then 0 stake is returned.
pub ErasTotalStake get(fn eras_total_stake):
map hasher(twox_64_concat) EraIndex => BalanceOf<T>;
/// Mode of era forcing.
pub ForceEra get(fn force_era) config(): Forcing;
/// The percentage of the slash that is distributed to reporters.
///
/// The rest of the slashed value is handled by the `Slash`.
pub SlashRewardFraction get(fn slash_reward_fraction) config(): Perbill;
/// The amount of currency given to reporters of a slash event which was
/// canceled by extraordinary circumstances (e.g. governance).
pub CanceledSlashPayout get(fn canceled_payout) config(): BalanceOf<T>;
/// All unapplied slashes that are queued for later.
pub UnappliedSlashes:
map hasher(twox_64_concat) EraIndex => Vec<UnappliedSlash<T::AccountId, BalanceOf<T>>>;
/// A mapping from still-bonded eras to the first session index of that era.
///
/// Must contains information for eras for the range:
/// `[active_era - bounding_duration; active_era]`
BondedEras: Vec<(EraIndex, SessionIndex)>;
/// All slashing events on validators, mapped by era to the highest slash proportion
/// and slash value of the era.
ValidatorSlashInEra:
double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId
=> Option<(Perbill, BalanceOf<T>)>;
/// All slashing events on nominators, mapped by era to the highest slash value of the era.
NominatorSlashInEra:
double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId
=> Option<BalanceOf<T>>;
/// Slashing spans for stash accounts.
SlashingSpans get(fn slashing_spans): map hasher(twox_64_concat) T::AccountId => Option<slashing::SlashingSpans>;
/// Records information about the maximum slash of a stash within a slashing span,
/// as well as how much reward has been paid out.
SpanSlash:
map hasher(twox_64_concat) (T::AccountId, slashing::SpanIndex)
=> slashing::SpanRecord<BalanceOf<T>>;
/// The earliest era for which we have a pending, unapplied slash.
EarliestUnappliedSlash: Option<EraIndex>;
/// Snapshot of validators at the beginning of the current election window. This should only
/// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`.
pub SnapshotValidators get(fn snapshot_validators): Option<Vec<T::AccountId>>;
/// Snapshot of nominators at the beginning of the current election window. This should only
/// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`.
pub SnapshotNominators get(fn snapshot_nominators): Option<Vec<T::AccountId>>;
/// The next validator set. At the end of an era, if this is available (potentially from the
/// result of an offchain worker), it is immediately used. Otherwise, the on-chain election
/// is executed.
pub QueuedElected get(fn queued_elected): Option<ElectionResult<T::AccountId, BalanceOf<T>>>;
/// The score of the current [`QueuedElected`].
pub QueuedScore get(fn queued_score): Option<ElectionScore>;
/// Flag to control the execution of the offchain election. When `Open(_)`, we accept
/// solutions to be submitted.
pub EraElectionStatus get(fn era_election_status): ElectionStatus<T::BlockNumber>;
/// True if the current **planned** session is final. Note that this does not take era
/// forcing into account.
pub IsCurrentSessionFinal get(fn is_current_session_final): bool = false;
/// True if network has been upgraded to this version.
/// Storage version of the pallet.
///
/// This is set to v3.0.0 for new networks.
StorageVersion build(|_: &GenesisConfig<T>| Releases::V4_0_0): Releases;
}
add_extra_genesis {
config(stakers):
Vec<(T::AccountId, T::AccountId, BalanceOf<T>, StakerStatus<T::AccountId>)>;
build(|config: &GenesisConfig<T>| {
for &(ref stash, ref controller, balance, ref status) in &config.stakers {
assert!(
T::Currency::free_balance(&stash) >= balance,
"Stash does not have enough balance to bond."
);
let _ = <Module<T>>::bond(
T::Origin::from(Some(stash.clone()).into()),
T::Lookup::unlookup(controller.clone()),
balance,
RewardDestination::Staked,
);
let _ = match status {
StakerStatus::Validator => {
<Module<T>>::validate(
T::Origin::from(Some(controller.clone()).into()),
Default::default(),
)
},
StakerStatus::Nominator(votes) => {
<Module<T>>::nominate(
T::Origin::from(Some(controller.clone()).into()),
votes.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(),
)
}, _ => Ok(())
};
}
});
}
}
decl_event!(
pub enum Event<T> where Balance = BalanceOf<T>, <T as frame_system::Trait>::AccountId {
/// The era payout has been set; the first balance is the validator-payout; the second is
/// the remainder from the maximum amount of reward.
/// \[era_index, validator_payout, remainder\]
EraPayout(EraIndex, Balance, Balance),
/// The staker has been rewarded by this amount. \[stash, amount\]
Reward(AccountId, Balance),
/// One validator (and its nominators) has been slashed by the given amount.
/// \[validator, amount\]
Slash(AccountId, Balance),
/// An old slashing report from a prior era was discarded because it could
/// not be processed. \[session_index\]
OldSlashingReportDiscarded(SessionIndex),
/// A new set of stakers was elected with the given \[compute\].
StakingElection(ElectionCompute),
/// A new solution for the upcoming election has been stored. \[compute\]
SolutionStored(ElectionCompute),
/// An account has bonded this amount. \[stash, amount\]
///
/// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably,
/// it will not be emitted for staking rewards when they are added to stake.
Bonded(AccountId, Balance),
/// An account has unbonded this amount. \[stash, amount\]
Unbonded(AccountId, Balance),
/// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance`
/// from the unlocking queue. \[stash, amount\]
Withdrawn(AccountId, Balance),
}
);
decl_error! {
/// Error for the staking module.
pub enum Error for Module<T: Trait> {
/// Not a controller account.
NotController,
/// Not a stash account.
NotStash,
/// Stash is already bonded.
AlreadyBonded,
/// Controller is already paired.
AlreadyPaired,
/// Targets cannot be empty.
EmptyTargets,
/// Duplicate index.
DuplicateIndex,
/// Slash record index out of bounds.
InvalidSlashIndex,
/// Can not bond with value less than minimum balance.
InsufficientValue,
/// Can not schedule more unlock chunks.
NoMoreChunks,
/// Can not rebond without unlocking chunks.
NoUnlockChunk,
/// Attempting to target a stash that still has funds.
FundedTarget,
/// Invalid era to reward.
InvalidEraToReward,
/// Invalid number of nominations.
InvalidNumberOfNominations,
/// Items are not sorted and unique.
NotSortedAndUnique,
/// Rewards for this era have already been claimed for this validator.
AlreadyClaimed,
/// The submitted result is received out of the open window.
OffchainElectionEarlySubmission,
/// The submitted result is not as good as the one stored on chain.
OffchainElectionWeakSubmission,
/// The snapshot data of the current window is missing.
SnapshotUnavailable,
/// Incorrect number of winners were presented.
OffchainElectionBogusWinnerCount,
/// One of the submitted winners is not an active candidate on chain (index is out of range
/// in snapshot).
OffchainElectionBogusWinner,
/// Error while building the assignment type from the compact. This can happen if an index
/// is invalid, or if the weights _overflow_.
OffchainElectionBogusCompact,
/// One of the submitted nominators is not an active nominator on chain.
OffchainElectionBogusNominator,
/// One of the submitted nominators has an edge to which they have not voted on chain.
OffchainElectionBogusNomination,
/// One of the submitted nominators has an edge which is submitted before the last non-zero
/// slash of the target.
OffchainElectionSlashedNomination,
/// A self vote must only be originated from a validator to ONLY themselves.
OffchainElectionBogusSelfVote,
/// The submitted result has unknown edges that are not among the presented winners.
OffchainElectionBogusEdge,
/// The claimed score does not match with the one computed from the data.
OffchainElectionBogusScore,
/// The election size is invalid.
OffchainElectionBogusElectionSize,
/// The call is not allowed at the given time due to restrictions of election period.
CallNotAllowed,
/// Incorrect previous history depth input provided.
IncorrectHistoryDepth,
/// Incorrect number of slashing spans provided.
IncorrectSlashingSpans,
BondAmountToowLow,
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
/// Number of sessions per era.
const SessionsPerEra: SessionIndex = T::SessionsPerEra::get();
/// Number of eras that staked funds must remain bonded for.
const BondingDuration: EraIndex = T::BondingDuration::get();
const MinBondAmount: BalanceOf<T> = T::MinBondAmount::get();
/// Number of eras that slashes are deferred by, after computation.
///
/// This should be less than the bonding duration.
/// Set to 0 if slashes should be applied immediately, without opportunity for
/// intervention.
const SlashDeferDuration: EraIndex = T::SlashDeferDuration::get();
/// The number of blocks before the end of the era from which election submissions are allowed.
///
/// Setting this to zero will disable the offchain compute and only on-chain seq-phragmen will
/// be used.
///
/// This is bounded by being within the last session. Hence, setting it to a value more than the
/// length of a session will be pointless.
const ElectionLookahead: T::BlockNumber = T::ElectionLookahead::get();
/// Maximum number of balancing iterations to run in the offchain submission.
///
/// If set to 0, balance_solution will not be executed at all.
const MaxIterations: u32 = T::MaxIterations::get();
/// The threshold of improvement that should be provided for a new solution to be accepted.
const MinSolutionScoreBump: Perbill = T::MinSolutionScoreBump::get();
/// The maximum number of nominators rewarded for each validator.
///
/// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim
/// their reward. This used to limit the i/o cost for the nominator payout.
const MaxNominatorRewardedPerValidator: u32 = T::MaxNominatorRewardedPerValidator::get();
type Error = Error<T>;
fn deposit_event() = default;
/// sets `ElectionStatus` to `Open(now)` where `now` is the block number at which the
/// election window has opened, if we are at the last session and less blocks than
/// `T::ElectionLookahead` is remaining until the next new session schedule. The offchain
/// worker, if applicable, will execute at the end of the current block, and solutions may
/// be submitted.
fn on_initialize(now: T::BlockNumber) -> Weight {
let mut consumed_weight = 0;
let mut add_weight = |reads, writes, weight| {
consumed_weight += T::DbWeight::get().reads_writes(reads, writes);
consumed_weight += weight;
};
if
// if we don't have any ongoing offchain compute.
Self::era_election_status().is_closed() &&
// either current session final based on the plan, or we're forcing.
(Self::is_current_session_final() || Self::will_era_be_forced())
{
if let Some(next_session_change) = T::NextNewSession::estimate_next_new_session(now) {
if let Some(remaining) = next_session_change.checked_sub(&now) {
if remaining <= T::ElectionLookahead::get() && !remaining.is_zero() {
// create snapshot.
let (did_snapshot, snapshot_weight) = Self::create_stakers_snapshot();
add_weight(0, 0, snapshot_weight);
if did_snapshot {
// Set the flag to make sure we don't waste any compute here in the same era
// after we have triggered the offline compute.
<EraElectionStatus<T>>::put(
ElectionStatus::<T::BlockNumber>::Open(now)
);
add_weight(0, 1, 0);
log!(info, "💸 Election window is Open({:?}). Snapshot created", now);
} else {
log!(warn, "💸 Failed to create snapshot at {:?}.", now);
}
}
}
} else {
log!(warn, "💸 Estimating next session change failed.");
}
add_weight(0, 0, T::NextNewSession::weight(now))
}
// For `era_election_status`, `is_current_session_final`, `will_era_be_forced`
add_weight(3, 0, 0);
// Additional read from `on_finalize`
add_weight(1, 0, 0);
consumed_weight
}
/// Check if the current block number is the one at which the election window has been set
/// to open. If so, it runs the offchain worker code.
fn offchain_worker(now: T::BlockNumber) {
use offchain_election::{set_check_offchain_execution_status, compute_offchain_election};
if Self::era_election_status().is_open_at(now) {
let offchain_status = set_check_offchain_execution_status::<T>(now);
if let Err(why) = offchain_status {
log!(warn, "💸 skipping offchain worker in open election window due to [{}]", why);
} else {
if let Err(e) = compute_offchain_election::<T>() {
log!(error, "💸 Error in election offchain worker: {:?}", e);
} else {
log!(debug, "💸 Executed offchain worker thread without errors.");
}
}
}
}
fn on_finalize() {
// Set the start of the first era.
if let Some(mut active_era) = Self::active_era() {
if active_era.start.is_none() {
let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::<u64>();
active_era.start = Some(now_as_millis_u64);
// This write only ever happens once, we don't include it in the weight in general
ActiveEra::put(active_era);
}
}
// `on_finalize` weight is tracked in `on_initialize`
}
fn integrity_test() {
sp_io::TestExternalities::new_empty().execute_with(||
assert!(
T::SlashDeferDuration::get() < T::BondingDuration::get() || T::BondingDuration::get() == 0,
"As per documentation, slash defer duration ({}) should be less than bonding duration ({}).",
T::SlashDeferDuration::get(),
T::BondingDuration::get(),
)
);
use sp_runtime::UpperOf;
// see the documentation of `Assignment::try_normalize`. Now we can ensure that this
// will always return `Ok`.
// 1. Maximum sum of Vec<ChainAccuracy> must fit into `UpperOf<ChainAccuracy>`.
assert!(
<usize as TryInto<UpperOf<ChainAccuracy>>>::try_into(MAX_NOMINATIONS)
.unwrap()
.checked_mul(<ChainAccuracy>::one().deconstruct().try_into().unwrap())
.is_some()
);
// 2. Maximum sum of Vec<OffchainAccuracy> must fit into `UpperOf<OffchainAccuracy>`.
assert!(
<usize as TryInto<UpperOf<OffchainAccuracy>>>::try_into(MAX_NOMINATIONS)
.unwrap()
.checked_mul(<OffchainAccuracy>::one().deconstruct().try_into().unwrap())
.is_some()
);
}
/// Take the origin account as a stash and lock up `value` of its balance. `controller` will
/// be the account that controls it.
///
/// `value` must be more than the `minimum_balance` specified by `T::Currency`.
///
/// The dispatch origin for this call must be _Signed_ by the stash account.
///
/// Emits `Bonded`.
///
/// # <weight>
/// - Independent of the arguments. Moderate complexity.
/// - O(1).
/// - Three extra DB entries.
///
/// NOTE: Two of the storage writes (`Self::bonded`, `Self::payee`) are _never_ cleaned
/// unless the `origin` falls below _existential deposit_ and gets removed as dust.
/// ------------------
/// Weight: O(1)
/// DB Weight:
/// - Read: Bonded, Ledger, [Origin Account], Current Era, History Depth, Locks
/// - Write: Bonded, Payee, [Origin Account], Locks, Ledger
/// # </weight>
#[weight = T::WeightInfo::bond()]
pub fn bond(origin,
controller: <T::Lookup as StaticLookup>::Source,
#[compact] value: BalanceOf<T>,
payee: RewardDestination<T::AccountId>,
) {
let stash = ensure_signed(origin)?;
ensure!(value >= T::MinBondAmount::get(), Error::<T>::BondAmountToowLow);
if <Bonded<T>>::contains_key(&stash) {
Err(Error::<T>::AlreadyBonded)?
}
let controller = T::Lookup::lookup(controller)?;
if <Ledger<T>>::contains_key(&controller) {
Err(Error::<T>::AlreadyPaired)?
}
// reject a bond which is considered to be _dust_.
if value < T::Currency::minimum_balance() {
Err(Error::<T>::InsufficientValue)?
}
// You're auto-bonded forever, here. We might improve this by only bonding when
// you actually validate/nominate and remove once you unbond __everything__.
<Bonded<T>>::insert(&stash, &controller);
<Payee<T>>::insert(&stash, payee);
system::Module::<T>::inc_ref(&stash);
let current_era = CurrentEra::get().unwrap_or(0);
let history_depth = Self::history_depth();
let last_reward_era = current_era.saturating_sub(history_depth);
let stash_balance = T::Currency::free_balance(&stash);
let value = value.min(stash_balance);
Self::deposit_event(RawEvent::Bonded(stash.clone(), value));
let item = StakingLedger {
stash,
total: value,
active: value,
unlocking: vec![],
claimed_rewards: (last_reward_era..current_era).collect(),
};
Self::update_ledger(&controller, &item);
}
/// Add some extra amount that have appeared in the stash `free_balance` into the balance up
/// for staking.
///
/// Use this if there are additional funds in your stash account that you wish to bond.
/// Unlike [`bond`] or [`unbond`] this function does not impose any limitation on the amount
/// that can be added.
///
/// The dispatch origin for this call must be _Signed_ by the stash, not the controller and
/// it can be only called when [`EraElectionStatus`] is `Closed`.
///
/// Emits `Bonded`.
///
/// # <weight>
/// - Independent of the arguments. Insignificant complexity.
/// - O(1).
/// - One DB entry.
/// ------------
/// DB Weight:
/// - Read: Era Election Status, Bonded, Ledger, [Origin Account], Locks
/// - Write: [Origin Account], Locks, Ledger
/// # </weight>
#[weight = T::WeightInfo::bond_extra()]
fn bond_extra(origin, #[compact] max_additional: BalanceOf<T>) {
ensure!(Self::era_election_status().is_closed(), Error::<T>::CallNotAllowed);
let stash = ensure_signed(origin)?;
let controller = Self::bonded(&stash).ok_or(Error::<T>::NotStash)?;
let mut ledger = Self::ledger(&controller).ok_or(Error::<T>::NotController)?;
let stash_balance = T::Currency::free_balance(&stash);
if let Some(extra) = stash_balance.checked_sub(&ledger.total) {
let extra = extra.min(max_additional);
ledger.total += extra;
ledger.active += extra;
Self::deposit_event(RawEvent::Bonded(stash, extra));
Self::update_ledger(&controller, &ledger);
}
}
/// Schedule a portion of the stash to be unlocked ready for transfer out after the bond
/// period ends. If this leaves an amount actively bonded less than
/// T::Currency::minimum_balance(), then it is increased to the full amount.
///
/// Once the unlock period is done, you can call `withdraw_unbonded` to actually move
/// the funds out of management ready for transfer.
///
/// No more than a limited number of unlocking chunks (see `MAX_UNLOCKING_CHUNKS`)
/// can co-exists at the same time. In that case, [`Call::withdraw_unbonded`] need
/// to be called first to remove some of the chunks (if possible).
///
/// The dispatch origin for this call must be _Signed_ by the controller, not the stash.
/// And, it can be only called when [`EraElectionStatus`] is `Closed`.
///
/// Emits `Unbonded`.
///
/// See also [`Call::withdraw_unbonded`].
///
/// # <weight>
/// - Independent of the arguments. Limited but potentially exploitable complexity.
/// - Contains a limited number of reads.
/// - Each call (requires the remainder of the bonded balance to be above `minimum_balance`)
/// will cause a new entry to be inserted into a vector (`Ledger.unlocking`) kept in storage.
/// The only way to clean the aforementioned storage item is also user-controlled via
/// `withdraw_unbonded`.
/// - One DB entry.
/// ----------
/// Weight: O(1)
/// DB Weight:
/// - Read: EraElectionStatus, Ledger, CurrentEra, Locks, BalanceOf Stash,
/// - Write: Locks, Ledger, BalanceOf Stash,
/// </weight>
#[weight = T::WeightInfo::unbond()]
fn unbond(origin, #[compact] value: BalanceOf<T>) {
ensure!(Self::era_election_status().is_closed(), Error::<T>::CallNotAllowed);
let controller = ensure_signed(origin)?;
let mut ledger = Self::ledger(&controller).ok_or(Error::<T>::NotController)?;
ensure!(
ledger.unlocking.len() < MAX_UNLOCKING_CHUNKS,
Error::<T>::NoMoreChunks,
);
let mut value = value.min(ledger.active);
if !value.is_zero() {
ledger.active -= value;
// Avoid there being a dust balance left in the staking system.
if ledger.active < T::Currency::minimum_balance() {
value += ledger.active;
ledger.active = Zero::zero();
}
// Note: in case there is no current era it is fine to bond one era more.
let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get();
ledger.unlocking.push(UnlockChunk { value, era });
Self::update_ledger(&controller, &ledger);
Self::deposit_event(RawEvent::Unbonded(ledger.stash, value));
}
}
/// Remove any unlocked chunks from the `unlocking` queue from our management.
///
/// This essentially frees up that balance to be used by the stash account to do
/// whatever it wants.
///
/// The dispatch origin for this call must be _Signed_ by the controller, not the stash.
/// And, it can be only called when [`EraElectionStatus`] is `Closed`.
///
/// Emits `Withdrawn`.
///
/// See also [`Call::unbond`].
///
/// # <weight>
/// - Could be dependent on the `origin` argument and how much `unlocking` chunks exist.
/// It implies `consolidate_unlocked` which loops over `Ledger.unlocking`, which is
/// indirectly user-controlled. See [`unbond`] for more detail.
/// - Contains a limited number of reads, yet the size of which could be large based on `ledger`.
/// - Writes are limited to the `origin` account key.
/// ---------------
/// Complexity O(S) where S is the number of slashing spans to remove
/// Update:
/// - Reads: EraElectionStatus, Ledger, Current Era, Locks, [Origin Account]
/// - Writes: [Origin Account], Locks, Ledger
/// Kill:
/// - Reads: EraElectionStatus, Ledger, Current Era, Bonded, Slashing Spans, [Origin
/// Account], Locks, BalanceOf stash
/// - Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators,
/// [Origin Account], Locks, BalanceOf stash.
/// - Writes Each: SpanSlash * S
/// NOTE: Weight annotation is the kill scenario, we refund otherwise.
/// # </weight>
#[weight = T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans)]
fn withdraw_unbonded(origin, num_slashing_spans: u32) -> DispatchResultWithPostInfo {
ensure!(Self::era_election_status().is_closed(), Error::<T>::CallNotAllowed);
let controller = ensure_signed(origin)?;
let mut ledger = Self::ledger(&controller).ok_or(Error::<T>::NotController)?;
let (stash, old_total) = (ledger.stash.clone(), ledger.total);
if let Some(current_era) = Self::current_era() {
ledger = ledger.consolidate_unlocked(current_era)
}
let post_info_weight = if ledger.unlocking.is_empty() && ledger.active.is_zero() {
// This account must have called `unbond()` with some value that caused the active
// portion to fall below existential deposit + will have no more unlocking chunks
// left. We can now safely remove all staking-related information.
Self::kill_stash(&stash, num_slashing_spans)?;
// remove the lock.
T::Currency::remove_lock(STAKING_ID, &stash);
// This is worst case scenario, so we use the full weight and return None
None
} else {
// This was the consequence of a partial unbond. just update the ledger and move on.
Self::update_ledger(&controller, &ledger);
// This is only an update, so we use less overall weight.
Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans))
};
// `old_total` should never be less than the new total because
// `consolidate_unlocked` strictly subtracts balance.
if ledger.total < old_total {
// Already checked that this won't overflow by entry condition.
let value = old_total - ledger.total;
Self::deposit_event(RawEvent::Withdrawn(stash, value));
}
Ok(post_info_weight.into())
}
/// Declare the desire to validate for the origin controller.
///
/// Effects will be felt at the beginning of the next era.
///
/// The dispatch origin for this call must be _Signed_ by the controller, not the stash.
/// And, it can be only called when [`EraElectionStatus`] is `Closed`.
///
/// # <weight>
/// - Independent of the arguments. Insignificant complexity.
/// - Contains a limited number of reads.
/// - Writes are limited to the `origin` account key.
/// -----------
/// Weight: O(1)
/// DB Weight:
/// - Read: Era Election Status, Ledger
/// - Write: Nominators, Validators
/// # </weight>
#[weight = T::WeightInfo::validate()]
pub fn validate(origin, prefs: ValidatorPrefs) {
ensure!(Self::era_election_status().is_closed(), Error::<T>::CallNotAllowed);
let controller = ensure_signed(origin)?;
let ledger = Self::ledger(&controller).ok_or(Error::<T>::NotController)?;
let stash = &ledger.stash;
<Nominators<T>>::remove(stash);
<Validators<T>>::insert(stash, prefs);
}
/// Declare the desire to nominate `targets` for the origin controller.
///
/// Effects will be felt at the beginning of the next era. This can only be called when
/// [`EraElectionStatus`] is `Closed`.
///
/// The dispatch origin for this call must be _Signed_ by the controller, not the stash.
/// And, it can be only called when [`EraElectionStatus`] is `Closed`.
///
/// # <weight>
/// - The transaction's complexity is proportional to the size of `targets` (N)
/// which is capped at CompactAssignments::LIMIT (MAX_NOMINATIONS).
/// - Both the reads and writes follow a similar pattern.
/// ---------
/// Weight: O(N)
/// where N is the number of targets
/// DB Weight:
/// - Reads: Era Election Status, Ledger, Current Era
/// - Writes: Validators, Nominators
/// # </weight>
#[weight = T::WeightInfo::nominate(targets.len() as u32)]
pub fn nominate(origin, targets: Vec<<T::Lookup as StaticLookup>::Source>) {
ensure!(Self::era_election_status().is_closed(), Error::<T>::CallNotAllowed);
let controller = ensure_signed(origin)?;
let ledger = Self::ledger(&controller).ok_or(Error::<T>::NotController)?;
let stash = &ledger.stash;
ensure!(!targets.is_empty(), Error::<T>::EmptyTargets);
let targets = targets.into_iter()
.take(MAX_NOMINATIONS)
.map(|t| T::Lookup::lookup(t))
.collect::<result::Result<Vec<T::AccountId>, _>>()?;
let nominations = Nominations {
targets,
// initial nominations are considered submitted at era 0. See `Nominations` doc
submitted_in: Self::current_era().unwrap_or(0),
suppressed: false,
};
<Validators<T>>::remove(stash);
<Nominators<T>>::insert(stash, &nominations);
}
/// Declare no desire to either validate or nominate.
///
/// Effects will be felt at the beginning of the next era.
///
/// The dispatch origin for this call must be _Signed_ by the controller, not the stash.
/// And, it can be only called when [`EraElectionStatus`] is `Closed`.
///
/// # <weight>
/// - Independent of the arguments. Insignificant complexity.
/// - Contains one read.
/// - Writes are limited to the `origin` account key.
/// --------
/// Weight: O(1)
/// DB Weight:
/// - Read: EraElectionStatus, Ledger
/// - Write: Validators, Nominators
/// # </weight>
#[weight = T::WeightInfo::chill()]
fn chill(origin) {
ensure!(Self::era_election_status().is_closed(), Error::<T>::CallNotAllowed);
let controller = ensure_signed(origin)?;
let ledger = Self::ledger(&controller).ok_or(Error::<T>::NotController)?;
Self::chill_stash(&ledger.stash);
}
/// (Re-)set the payment target for a controller.
///
/// Effects will be felt at the beginning of the next era.
///
/// The dispatch origin for this call must be _Signed_ by the controller, not the stash.
///
/// # <weight>
/// - Independent of the arguments. Insignificant complexity.
/// - Contains a limited number of reads.
/// - Writes are limited to the `origin` account key.
/// ---------
/// - Weight: O(1)
/// - DB Weight:
/// - Read: Ledger
/// - Write: Payee
/// # </weight>
#[weight = T::WeightInfo::set_payee()]
fn set_payee(origin, payee: RewardDestination<T::AccountId>) {
let controller = ensure_signed(origin)?;
let ledger = Self::ledger(&controller).ok_or(Error::<T>::NotController)?;
let stash = &ledger.stash;
<Payee<T>>::insert(stash, payee);
}
/// (Re-)set the controller of a stash.
///
/// Effects will be felt at the beginning of the next era.
///
/// The dispatch origin for this call must be _Signed_ by the stash, not the controller.
///
/// # <weight>
/// - Independent of the arguments. Insignificant complexity.
/// - Contains a limited number of reads.
/// - Writes are limited to the `origin` account key.
/// ----------
/// Weight: O(1)
/// DB Weight:
/// - Read: Bonded, Ledger New Controller, Ledger Old Controller
/// - Write: Bonded, Ledger New Controller, Ledger Old Controller
/// # </weight>
#[weight = T::WeightInfo::set_controller()]
fn set_controller(origin, controller: <T::Lookup as StaticLookup>::Source) {
let stash = ensure_signed(origin)?;
let old_controller = Self::bonded(&stash).ok_or(Error::<T>::NotStash)?;
let controller = T::Lookup::lookup(controller)?;
if <Ledger<T>>::contains_key(&controller) {
Err(Error::<T>::AlreadyPaired)?
}
if controller != old_controller {
<Bonded<T>>::insert(&stash, &controller);
if let Some(l) = <Ledger<T>>::take(&old_controller) {
<Ledger<T>>::insert(&controller, l);
}
}
}
/// Sets the ideal number of validators.
///
/// The dispatch origin must be Root.
///
/// # <weight>
/// Weight: O(1)
/// Write: Validator Count
/// # </weight>
#[weight = T::WeightInfo::set_validator_count()]
fn set_validator_count(origin, #[compact] new: u32) {
ensure_root(origin)?;
ValidatorCount::put(new);
}
/// Increments the ideal number of validators.
///
/// The dispatch origin must be Root.
///
/// # <weight>
/// Same as [`set_validator_count`].
/// # </weight>
#[weight = T::WeightInfo::set_validator_count()]
fn increase_validator_count(origin, #[compact] additional: u32) {
ensure_root(origin)?;
ValidatorCount::mutate(|n| *n += additional);
}
/// Scale up the ideal number of validators by a factor.
///
/// The dispatch origin must be Root.
///
/// # <weight>
/// Same as [`set_validator_count`].
/// # </weight>
#[weight = T::WeightInfo::set_validator_count()]
fn scale_validator_count(origin, factor: Percent) {
ensure_root(origin)?;
ValidatorCount::mutate(|n| *n += factor * *n);
}
/// Force there to be no new eras indefinitely.
///
/// The dispatch origin must be Root.
///
/// # <weight>
/// - No arguments.
/// - Weight: O(1)
/// - Write: ForceEra
/// # </weight>
#[weight = T::WeightInfo::force_no_eras()]
fn force_no_eras(origin) {
ensure_root(origin)?;
ForceEra::put(Forcing::ForceNone);
}
/// Force there to be a new era at the end of the next session. After this, it will be
/// reset to normal (non-forced) behaviour.
///
/// The dispatch origin must be Root.
///
/// # <weight>
/// - No arguments.
/// - Weight: O(1)
/// - Write ForceEra
/// # </weight>
#[weight = T::WeightInfo::force_new_era()]
fn force_new_era(origin) {
ensure_root(origin)?;
ForceEra::put(Forcing::ForceNew);
}
/// Set the validators who cannot be slashed (if any).
///
/// The dispatch origin must be Root.
///
/// # <weight>
/// - O(V)
/// - Write: Invulnerables
/// # </weight>
#[weight = T::WeightInfo::set_invulnerables(invulnerables.len() as u32)]
fn set_invulnerables(origin, invulnerables: Vec<T::AccountId>) {
ensure_root(origin)?;
<Invulnerables<T>>::put(invulnerables);
}
/// Force a current staker to become completely unstaked, immediately.
///
/// The dispatch origin must be Root.
///
/// # <weight>
/// O(S) where S is the number of slashing spans to be removed
/// Reads: Bonded, Slashing Spans, Account, Locks
/// Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, Account, Locks
/// Writes Each: SpanSlash * S
/// # </weight>
#[weight = T::WeightInfo::force_unstake(*num_slashing_spans)]
fn force_unstake(origin, stash: T::AccountId, num_slashing_spans: u32) {
ensure_root(origin)?;
// remove all staking-related information.
Self::kill_stash(&stash, num_slashing_spans)?;
// remove the lock.
T::Currency::remove_lock(STAKING_ID, &stash);
}
/// Force there to be a new era at the end of sessions indefinitely.
///
/// The dispatch origin must be Root.
///
/// # <weight>
/// - Weight: O(1)
/// - Write: ForceEra
/// # </weight>
#[weight = T::WeightInfo::force_new_era_always()]
fn force_new_era_always(origin) {
ensure_root(origin)?;
ForceEra::put(Forcing::ForceAlways);
}
/// Cancel enactment of a deferred slash.
///
/// Can be called by the `T::SlashCancelOrigin`.
///
/// Parameters: era and indices of the slashes for that era to kill.
///
/// # <weight>
/// Complexity: O(U + S)
/// with U unapplied slashes weighted with U=1000
/// and S is the number of slash indices to be canceled.
/// - Read: Unapplied Slashes
/// - Write: Unapplied Slashes
/// # </weight>
#[weight = T::WeightInfo::cancel_deferred_slash(slash_indices.len() as u32)]
fn cancel_deferred_slash(origin, era: EraIndex, slash_indices: Vec<u32>) {
T::SlashCancelOrigin::ensure_origin(origin)?;
ensure!(!slash_indices.is_empty(), Error::<T>::EmptyTargets);
ensure!(is_sorted_and_unique(&slash_indices), Error::<T>::NotSortedAndUnique);
let mut unapplied = <Self as Store>::UnappliedSlashes::get(&era);
let last_item = slash_indices[slash_indices.len() - 1];
ensure!((last_item as usize) < unapplied.len(), Error::<T>::InvalidSlashIndex);
for (removed, index) in slash_indices.into_iter().enumerate() {
let index = (index as usize) - removed;
unapplied.remove(index);
}
<Self as Store>::UnappliedSlashes::insert(&era, &unapplied);
}
/// Pay out all the stakers behind a single validator for a single era.
///
/// - `validator_stash` is the stash account of the validator. Their nominators, up to
/// `T::MaxNominatorRewardedPerValidator`, will also receive their rewards.
/// - `era` may be any era between `[current_era - history_depth; current_era]`.
///
/// The origin of this call must be _Signed_. Any account can call this function, even if
/// it is not one of the stakers.
///
/// This can only be called when [`EraElectionStatus`] is `Closed`.
///
/// # <weight>
/// - Time complexity: at most O(MaxNominatorRewardedPerValidator).
/// - Contains a limited number of reads and writes.
/// -----------
/// N is the Number of payouts for the validator (including the validator)
/// Weight:
/// - Reward Destination Staked: O(N)
/// - Reward Destination Controller (Creating): O(N)
/// DB Weight:
/// - Read: EraElectionStatus, CurrentEra, HistoryDepth, ErasValidatorReward,
/// ErasStakersClipped, ErasRewardPoints, ErasValidatorPrefs (8 items)
/// - Read Each: Bonded, Ledger, Payee, Locks, System Account (5 items)
/// - Write Each: System Account, Locks, Ledger (3 items)
///
/// NOTE: weights are assuming that payouts are made to alive stash account (Staked).
/// Paying even a dead controller is cheaper weight-wise. We don't do any refunds here.
/// # </weight>
#[weight = T::WeightInfo::payout_stakers_alive_staked(T::MaxNominatorRewardedPerValidator::get())]
fn payout_stakers(origin, validator_stash: T::AccountId, era: EraIndex) -> DispatchResult {
ensure!(Self::era_election_status().is_closed(), Error::<T>::CallNotAllowed);
ensure_signed(origin)?;
Self::do_payout_stakers(validator_stash, era)
}
/// Rebond a portion of the stash scheduled to be unlocked.
///
/// The dispatch origin must be signed by the controller, and it can be only called when
/// [`EraElectionStatus`] is `Closed`.
///
/// # <weight>
/// - Time complexity: O(L), where L is unlocking chunks
/// - Bounded by `MAX_UNLOCKING_CHUNKS`.
/// - Storage changes: Can't increase storage, only decrease it.
/// ---------------
/// - DB Weight:
/// - Reads: EraElectionStatus, Ledger, Locks, [Origin Account]
/// - Writes: [Origin Account], Locks, Ledger
/// # </weight>
#[weight = T::WeightInfo::rebond(MAX_UNLOCKING_CHUNKS as u32)]
fn rebond(origin, #[compact] value: BalanceOf<T>) -> DispatchResultWithPostInfo {
ensure!(Self::era_election_status().is_closed(), Error::<T>::CallNotAllowed);
let controller = ensure_signed(origin)?;
let ledger = Self::ledger(&controller).ok_or(Error::<T>::NotController)?;
ensure!(!ledger.unlocking.is_empty(), Error::<T>::NoUnlockChunk);
let ledger = ledger.rebond(value);
Self::update_ledger(&controller, &ledger);
Ok(Some(
35 * WEIGHT_PER_MICROS
+ 50 * WEIGHT_PER_NANOS * (ledger.unlocking.len() as Weight)
+ T::DbWeight::get().reads_writes(3, 2)
).into())
}
/// Set `HistoryDepth` value. This function will delete any history information
/// when `HistoryDepth` is reduced.
///
/// Parameters:
/// - `new_history_depth`: The new history depth you would like to set.
/// - `era_items_deleted`: The number of items that will be deleted by this dispatch.
/// This should report all the storage items that will be deleted by clearing old
/// era history. Needed to report an accurate weight for the dispatch. Trusted by
/// `Root` to report an accurate number.
///
/// Origin must be root.
///
/// # <weight>
/// - E: Number of history depths removed, i.e. 10 -> 7 = 3
/// - Weight: O(E)
/// - DB Weight:
/// - Reads: Current Era, History Depth
/// - Writes: History Depth
/// - Clear Prefix Each: Era Stakers, EraStakersClipped, ErasValidatorPrefs
/// - Writes Each: ErasValidatorReward, ErasRewardPoints, ErasTotalStake, ErasStartSessionIndex
/// # </weight>
#[weight = T::WeightInfo::set_history_depth(*_era_items_deleted)]
fn set_history_depth(origin,
#[compact] new_history_depth: EraIndex,
#[compact] _era_items_deleted: u32,
) {
ensure_root(origin)?;
if let Some(current_era) = Self::current_era() {
HistoryDepth::mutate(|history_depth| {
let last_kept = current_era.checked_sub(*history_depth).unwrap_or(0);
let new_last_kept = current_era.checked_sub(new_history_depth).unwrap_or(0);
for era_index in last_kept..new_last_kept {
Self::clear_era_information(era_index);
}
*history_depth = new_history_depth
})
}
}
/// Remove all data structure concerning a staker/stash once its balance is zero.
/// This is essentially equivalent to `withdraw_unbonded` except it can be called by anyone
/// and the target `stash` must have no funds left.
///
/// This can be called from any origin.
///
/// - `stash`: The stash account to reap. Its balance must be zero.
///
/// # <weight>
/// Complexity: O(S) where S is the number of slashing spans on the account.
/// DB Weight:
/// - Reads: Stash Account, Bonded, Slashing Spans, Locks
/// - Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, Stash Account, Locks
/// - Writes Each: SpanSlash * S
/// # </weight>
#[weight = T::WeightInfo::reap_stash(*num_slashing_spans)]
fn reap_stash(_origin, stash: T::AccountId, num_slashing_spans: u32) {
ensure!(T::Currency::total_balance(&stash).is_zero(), Error::<T>::FundedTarget);
Self::kill_stash(&stash, num_slashing_spans)?;
T::Currency::remove_lock(STAKING_ID, &stash);
}
/// Submit an election result to the chain. If the solution:
///
/// 1. is valid.
/// 2. has a better score than a potentially existing solution on chain.
///
/// then, it will be _put_ on chain.
///
/// A solution consists of two pieces of data:
///
/// 1. `winners`: a flat vector of all the winners of the round.
/// 2. `assignments`: the compact version of an assignment vector that encodes the edge
/// weights.
///
/// Both of which may be computed using _phragmen_, or any other algorithm.
///
/// Additionally, the submitter must provide:
///
/// - The `score` that they claim their solution has.
///
/// Both validators and nominators will be represented by indices in the solution. The
/// indices should respect the corresponding types ([`ValidatorIndex`] and
/// [`NominatorIndex`]). Moreover, they should be valid when used to index into
/// [`SnapshotValidators`] and [`SnapshotNominators`]. Any invalid index will cause the
/// solution to be rejected. These two storage items are set during the election window and
/// may be used to determine the indices.
///
/// A solution is valid if:
///
/// 0. It is submitted when [`EraElectionStatus`] is `Open`.
/// 1. Its claimed score is equal to the score computed on-chain.
/// 2. Presents the correct number of winners.
/// 3. All indexes must be value according to the snapshot vectors. All edge values must
/// also be correct and should not overflow the granularity of the ratio type (i.e. 256
/// or billion).
/// 4. For each edge, all targets are actually nominated by the voter.
/// 5. Has correct self-votes.
///
/// A solutions score is consisted of 3 parameters:
///
/// 1. `min { support.total }` for each support of a winner. This value should be maximized.
/// 2. `sum { support.total }` for each support of a winner. This value should be minimized.
/// 3. `sum { support.total^2 }` for each support of a winner. This value should be
/// minimized (to ensure less variance)
///
/// # <weight>
/// The transaction is assumed to be the longest path, a better solution.
/// - Initial solution is almost the same.
/// - Worse solution is retraced in pre-dispatch-checks which sets its own weight.
/// # </weight>
#[weight = T::WeightInfo::submit_solution_better(
size.validators.into(),
size.nominators.into(),
compact.len() as u32,
winners.len() as u32,
)]
pub fn submit_election_solution(
origin,
winners: Vec<ValidatorIndex>,
compact: CompactAssignments,
score: ElectionScore,
era: EraIndex,
size: ElectionSize,
) -> DispatchResultWithPostInfo {
let _who = ensure_signed(origin)?;
Self::check_and_replace_solution(
winners,
compact,
ElectionCompute::Signed,
score,
era,
size,
)
}
/// Unsigned version of `submit_election_solution`.
///
/// Note that this must pass the [`ValidateUnsigned`] check which only allows transactions
/// from the local node to be included. In other words, only the block author can include a
/// transaction in the block.
///
/// # <weight>
/// See [`submit_election_solution`].
/// # </weight>
#[weight = T::WeightInfo::submit_solution_better(
size.validators.into(),
size.nominators.into(),
compact.len() as u32,
winners.len() as u32,
)]
pub fn submit_election_solution_unsigned(
origin,
winners: Vec<ValidatorIndex>,
compact: CompactAssignments,
score: ElectionScore,
era: EraIndex,
size: ElectionSize,
) -> DispatchResultWithPostInfo {
ensure_none(origin)?;
let adjustments = Self::check_and_replace_solution(
winners,
compact,
ElectionCompute::Unsigned,
score,
era,
size,
).expect(
"An unsigned solution can only be submitted by validators; A validator should \
always produce correct solutions, else this block should not be imported, thus \
effectively depriving the validators from their authoring reward. Hence, this panic
is expected."
);
Ok(adjustments)
}
}
}
impl<T: Trait> Module<T> {
/// The total balance that can be slashed from a stash account as of right now.
pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf<T> {
// Weight note: consider making the stake accessible through stash.
Self::bonded(stash).and_then(Self::ledger).map(|l| l.active).unwrap_or_default()
}
/// internal impl of [`slashable_balance_of`] that returns [`VoteWeight`].
pub fn slashable_balance_of_vote_weight(stash: &T::AccountId) -> VoteWeight {
<T::CurrencyToVote as Convert<BalanceOf<T>, VoteWeight>>::convert(
Self::slashable_balance_of(stash),
)
}
/// Dump the list of validators and nominators into vectors and keep them on-chain.
///
/// This data is used to efficiently evaluate election results. returns `true` if the operation
/// is successful.
pub fn create_stakers_snapshot() -> (bool, Weight) {
let mut consumed_weight = 0;
let mut add_db_reads_writes = |reads, writes| {
consumed_weight += T::DbWeight::get().reads_writes(reads, writes);
};
let validators = <Validators<T>>::iter().map(|(v, _)| v).collect::<Vec<_>>();
let mut nominators = <Nominators<T>>::iter().map(|(n, _)| n).collect::<Vec<_>>();
let num_validators = validators.len();
let num_nominators = nominators.len();
add_db_reads_writes((num_validators + num_nominators) as Weight, 0);
if num_validators > MAX_VALIDATORS ||
num_nominators.saturating_add(num_validators) > MAX_NOMINATORS
{
log!(
warn,
"💸 Snapshot size too big [{} <> {}][{} <> {}].",
num_validators,
MAX_VALIDATORS,
num_nominators,
MAX_NOMINATORS,
);
(false, consumed_weight)
} else {
// all validators nominate themselves;
nominators.extend(validators.clone());
<SnapshotValidators<T>>::put(validators);
<SnapshotNominators<T>>::put(nominators);
add_db_reads_writes(0, 2);
(true, consumed_weight)
}
}
/// Clears both snapshots of stakers.
fn kill_stakers_snapshot() {
<SnapshotValidators<T>>::kill();
<SnapshotNominators<T>>::kill();
}
fn do_payout_stakers(validator_stash: T::AccountId, era: EraIndex) -> DispatchResult {
// Validate input data
let current_era = CurrentEra::get().ok_or(Error::<T>::InvalidEraToReward)?;
ensure!(era <= current_era, Error::<T>::InvalidEraToReward);
let history_depth = Self::history_depth();
ensure!(era >= current_era.saturating_sub(history_depth), Error::<T>::InvalidEraToReward);
// Note: if era has no reward to be claimed, era may be future. better not to update
// `ledger.claimed_rewards` in this case.
let era_payout =
<ErasValidatorReward<T>>::get(&era).ok_or_else(|| Error::<T>::InvalidEraToReward)?;
let controller = Self::bonded(&validator_stash).ok_or(Error::<T>::NotStash)?;
let mut ledger = <Ledger<T>>::get(&controller).ok_or_else(|| Error::<T>::NotController)?;
ledger.claimed_rewards.retain(|&x| x >= current_era.saturating_sub(history_depth));
match ledger.claimed_rewards.binary_search(&era) {
Ok(_) => Err(Error::<T>::AlreadyClaimed)?,
Err(pos) => ledger.claimed_rewards.insert(pos, era),
}
let exposure = <ErasStakersClipped<T>>::get(&era, &ledger.stash);
/* Input data seems good, no errors allowed after this point */
<Ledger<T>>::insert(&controller, &ledger);
// Get Era reward points. It has TOTAL and INDIVIDUAL
// Find the fraction of the era reward that belongs to the validator
// Take that fraction of the eras rewards to split to nominator and validator
//
// Then look at the validator, figure out the proportion of their reward
// which goes to them and each of their nominators.
let era_reward_points = <ErasRewardPoints<T>>::get(&era);
let total_reward_points = era_reward_points.total;
let validator_reward_points = era_reward_points
.individual
.get(&ledger.stash)
.map(|points| *points)
.unwrap_or_else(|| Zero::zero());
// Nothing to do if they have no reward points.
if validator_reward_points.is_zero() {
return Ok(())
}
// This is the fraction of the total reward that the validator and the
// nominators will get.
let validator_total_reward_part =
Perbill::from_rational_approximation(validator_reward_points, total_reward_points);
// This is how much validator + nominators are entitled to.
let validator_total_payout = validator_total_reward_part * era_payout;
let validator_prefs = Self::eras_validator_prefs(&era, &validator_stash);
// Validator first gets a cut off the top.
let validator_commission = validator_prefs.commission;
let validator_commission_payout = validator_commission * validator_total_payout;
let validator_leftover_payout = validator_total_payout - validator_commission_payout;
// Now let's calculate how this is split to the validator.
let validator_exposure_part =
Perbill::from_rational_approximation(exposure.own, exposure.total);
let validator_staking_payout = validator_exposure_part * validator_leftover_payout;
// We can now make total validator payout:
if let Some(imbalance) =
Self::make_payout(&ledger.stash, validator_staking_payout + validator_commission_payout)
{
Self::deposit_event(RawEvent::Reward(ledger.stash, imbalance.peek()));
}
// Lets now calculate how this is split to the nominators.
// Reward only the clipped exposures. Note this is not necessarily sorted.
for nominator in exposure.others.iter() {
let nominator_exposure_part =
Perbill::from_rational_approximation(nominator.value, exposure.total);
let nominator_reward: BalanceOf<T> =
nominator_exposure_part * validator_leftover_payout;
// We can now make nominator payout:
if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) {
Self::deposit_event(RawEvent::Reward(nominator.who.clone(), imbalance.peek()));
}
}
Ok(())
}
/// Update the ledger for a controller.
///
/// This will also update the stash lock.
fn update_ledger(
controller: &T::AccountId,
ledger: &StakingLedger<T::AccountId, BalanceOf<T>>,
) {
T::Currency::set_lock(STAKING_ID, &ledger.stash, ledger.total, WithdrawReasons::all());
<Ledger<T>>::insert(controller, ledger);
}
/// Chill a stash account.
fn chill_stash(stash: &T::AccountId) {
<Validators<T>>::remove(stash);
<Nominators<T>>::remove(stash);
}
/// Actually make a payment to a staker. This uses the currency's reward function
/// to pay the right payee for the given staker account.
fn make_payout(stash: &T::AccountId, amount: BalanceOf<T>) -> Option<PositiveImbalanceOf<T>> {
let dest = Self::payee(stash);
match dest {
RewardDestination::Controller => Self::bonded(stash)
.and_then(|controller| Some(T::Currency::deposit_creating(&controller, amount))),
RewardDestination::Stash => T::Currency::deposit_into_existing(stash, amount).ok(),
RewardDestination::Staked => Self::bonded(stash)
.and_then(|c| Self::ledger(&c).map(|l| (c, l)))
.and_then(|(controller, mut l)| {
l.active += amount;
l.total += amount;
let r = T::Currency::deposit_into_existing(stash, amount).ok();
Self::update_ledger(&controller, &l);
r
}),
RewardDestination::Account(dest_account) =>
Some(T::Currency::deposit_creating(&dest_account, amount)),
}
}
/// Plan a new session potentially trigger a new era.
fn new_session(session_index: SessionIndex) -> Option<Vec<T::AccountId>> {
if let Some(current_era) = Self::current_era() {
// Initial era has been set.
let current_era_start_session_index = Self::eras_start_session_index(current_era)
.unwrap_or_else(|| {
frame_support::print("Error: start_session_index must be set for current_era");
0
});
let era_length =
session_index.checked_sub(current_era_start_session_index).unwrap_or(0); // Must never happen.
match ForceEra::get() {
Forcing::ForceNew => ForceEra::kill(),
Forcing::ForceAlways => (),
Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (),
_ => {
// Either `ForceNone`, or `NotForcing && era_length < T::SessionsPerEra::get()`.
if era_length + 1 == T::SessionsPerEra::get() {
IsCurrentSessionFinal::put(true);
} else if era_length >= T::SessionsPerEra::get() {
// Should only happen when we are ready to trigger an era but we have
// ForceNone, otherwise previous arm would short circuit.
Self::close_election_window();
}
return None
},
}
// new era.
Self::new_era(session_index)
} else {
// Set initial era
Self::new_era(session_index)
}
}
/// Basic and cheap checks that we perform in validate unsigned, and in the execution.
///
/// State reads: ElectionState, CurrentEr, QueuedScore.
///
/// This function does weight refund in case of errors, which is based upon the fact that it is
/// called at the very beginning of the call site's function.
pub fn pre_dispatch_checks(score: ElectionScore, era: EraIndex) -> DispatchResultWithPostInfo {
// discard solutions that are not in-time
// check window open
ensure!(
Self::era_election_status().is_open(),
Error::<T>::OffchainElectionEarlySubmission.with_weight(T::DbWeight::get().reads(1)),
);
// check current era.
if let Some(current_era) = Self::current_era() {
ensure!(
current_era == era,
Error::<T>::OffchainElectionEarlySubmission
.with_weight(T::DbWeight::get().reads(2)),
)
}
// assume the given score is valid. Is it better than what we have on-chain, if we have any?
if let Some(queued_score) = Self::queued_score() {
ensure!(
is_score_better(score, queued_score, T::MinSolutionScoreBump::get()),
Error::<T>::OffchainElectionWeakSubmission.with_weight(T::DbWeight::get().reads(3)),
)
}
Ok(None.into())
}
/// Checks a given solution and if correct and improved, writes it on chain as the queued result
/// of the next round. This may be called by both a signed and an unsigned transaction.
pub fn check_and_replace_solution(
winners: Vec<ValidatorIndex>,
compact_assignments: CompactAssignments,
compute: ElectionCompute,
claimed_score: ElectionScore,
era: EraIndex,
election_size: ElectionSize,
) -> DispatchResultWithPostInfo {
// Do the basic checks. era, claimed score and window open.
let _ = Self::pre_dispatch_checks(claimed_score, era)?;
// before we read any further state, we check that the unique targets in compact is same as
// compact. is a all in-memory check and easy to do. Moreover, it ensures that the solution
// is not full of bogus edges that can cause lots of reads to SlashingSpans. Thus, we can
// assume that the storage access of this function is always O(|winners|), not
// O(|compact.edge_count()|).
ensure!(
compact_assignments.unique_targets().len() == winners.len(),
Error::<T>::OffchainElectionBogusWinnerCount,
);
// Check that the number of presented winners is sane. Most often we have more candidates
// than we need. Then it should be `Self::validator_count()`. Else it should be all the
// candidates.
let snapshot_validators_length = <SnapshotValidators<T>>::decode_len()
.map(|l| l as u32)
.ok_or_else(|| Error::<T>::SnapshotUnavailable)?;
// size of the solution must be correct.
ensure!(
snapshot_validators_length == u32::from(election_size.validators),
Error::<T>::OffchainElectionBogusElectionSize,
);
// check the winner length only here and when we know the length of the snapshot validators
// length.
let desired_winners = Self::validator_count().min(snapshot_validators_length);
ensure!(
winners.len() as u32 == desired_winners,
Error::<T>::OffchainElectionBogusWinnerCount
);
let snapshot_nominators_len = <SnapshotNominators<T>>::decode_len()
.map(|l| l as u32)
.ok_or_else(|| Error::<T>::SnapshotUnavailable)?;
// rest of the size of the solution must be correct.
ensure!(
snapshot_nominators_len == election_size.nominators,
Error::<T>::OffchainElectionBogusElectionSize,
);
// decode snapshot validators.
let snapshot_validators =
Self::snapshot_validators().ok_or(Error::<T>::SnapshotUnavailable)?;
// check if all winners were legit; this is rather cheap. Replace with accountId.
let winners = winners
.into_iter()
.map(|widx| {
// NOTE: at the moment, since staking is explicitly blocking any offence until
// election is closed, we don't check here if the account id at
// `snapshot_validators[widx]` is actually a validator. If this ever changes, this
// loop needs to also check this.
snapshot_validators
.get(widx as usize)
.cloned()
.ok_or(Error::<T>::OffchainElectionBogusWinner)
})
.collect::<Result<Vec<T::AccountId>, Error<T>>>()?;
// decode the rest of the snapshot.
let snapshot_nominators =
Self::snapshot_nominators().ok_or(Error::<T>::SnapshotUnavailable)?;
// helpers
let nominator_at = |i: NominatorIndex| -> Option<T::AccountId> {
snapshot_nominators.get(i as usize).cloned()
};
let validator_at = |i: ValidatorIndex| -> Option<T::AccountId> {
snapshot_validators.get(i as usize).cloned()
};
// un-compact.
let assignments =
compact_assignments.into_assignment(nominator_at, validator_at).map_err(|e| {
// log the error since it is not propagated into the runtime error.
log!(warn, "💸 un-compacting solution failed due to {:?}", e);
Error::<T>::OffchainElectionBogusCompact
})?;
// check all nominators actually including the claimed vote. Also check correct self votes.
// Note that we assume all validators and nominators in `assignments` are properly bonded,
// because they are coming from the snapshot via a given index.
for Assignment { who, distribution } in assignments.iter() {
let is_validator = <Validators<T>>::contains_key(&who);
let maybe_nomination = Self::nominators(&who);
if !(maybe_nomination.is_some() ^ is_validator) {
// all of the indices must map to either a validator or a nominator. If this is ever
// not the case, then the locking system of staking is most likely faulty, or we
// have bigger problems.
log!(error, "💸 detected an error in the staking locking and snapshot.");
// abort.
return Err(Error::<T>::OffchainElectionBogusNominator.into())
}
if !is_validator {
// a normal vote
let nomination = maybe_nomination.expect(
"exactly one of `maybe_validator` and `maybe_nomination.is_some` is true. \
is_validator is false; maybe_nomination is some; qed",
);
// NOTE: we don't really have to check here if the sum of all edges are the
// nominator correct. Un-compacting assures this by definition.
for (t, _) in distribution {
// each target in the provided distribution must be actually nominated by the
// nominator after the last non-zero slash.
if nomination.targets.iter().find(|&tt| tt == t).is_none() {
return Err(Error::<T>::OffchainElectionBogusNomination.into())
}
if <Self as Store>::SlashingSpans::get(&t)
.map_or(false, |spans| nomination.submitted_in < spans.last_nonzero_slash())
{
return Err(Error::<T>::OffchainElectionSlashedNomination.into())
}
}
} else {
// a self vote
ensure!(distribution.len() == 1, Error::<T>::OffchainElectionBogusSelfVote);
ensure!(distribution[0].0 == *who, Error::<T>::OffchainElectionBogusSelfVote);
// defensive only. A compact assignment of length one does NOT encode the weight and
// it is always created to be 100%.
ensure!(
distribution[0].1 == OffchainAccuracy::one(),
Error::<T>::OffchainElectionBogusSelfVote,
);
}
}
// convert into staked assignments.
let staked_assignments = sp_npos_elections::assignment_ratio_to_staked(
assignments,
Self::slashable_balance_of_vote_weight,
);
// build the support map thereof in order to evaluate.
let supports = build_support_map::<T::AccountId>(&winners, &staked_assignments)
.map_err(|_| Error::<T>::OffchainElectionBogusEdge)?;
// Check if the score is the same as the claimed one.
let submitted_score = evaluate_support(&supports);
ensure!(submitted_score == claimed_score, Error::<T>::OffchainElectionBogusScore);
// At last, alles Ok. Exposures and store the result.
let exposures = Self::collect_exposure(supports);
log!(
info,
"💸 A better solution (with compute {:?} and score {:?}) has been validated and stored on chain.",
compute,
submitted_score,
);
// write new results.
<QueuedElected<T>>::put(ElectionResult { elected_stashes: winners, compute, exposures });
QueuedScore::put(submitted_score);
// emit event.
Self::deposit_event(RawEvent::SolutionStored(compute));
Ok(None.into())
}
/// Start a session potentially starting an era.
fn start_session(start_session: SessionIndex) {
let next_active_era = Self::active_era().map(|e| e.index + 1).unwrap_or(0);
if let Some(next_active_era_start_session_index) =
Self::eras_start_session_index(next_active_era)
{
if next_active_era_start_session_index == start_session {
Self::start_era(start_session);
} else if next_active_era_start_session_index < start_session {
// This arm should never happen, but better handle it than to stall the
// staking pallet.
frame_support::print("Warning: A session appears to have been skipped.");
Self::start_era(start_session);
}
}
}
/// End a session potentially ending an era.
fn end_session(session_index: SessionIndex) {
if let Some(active_era) = Self::active_era() {
if let Some(next_active_era_start_session_index) =
Self::eras_start_session_index(active_era.index + 1)
{
if next_active_era_start_session_index == session_index + 1 {
Self::end_era(active_era, session_index);
}
}
}
}
///
/// * Increment `active_era.index`,
/// * reset `active_era.start`,
/// * update `BondedEras` and apply slashes.
fn start_era(start_session: SessionIndex) {
let active_era = ActiveEra::mutate(|active_era| {
let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0);
*active_era = Some(ActiveEraInfo {
index: new_index,
// Set new active era start in next `on_finalize`. To guarantee usage of `Time`
start: None,
});
new_index
});
let bonding_duration = T::BondingDuration::get();
BondedEras::mutate(|bonded| {
bonded.push((active_era, start_session));
if active_era > bonding_duration {
let first_kept = active_era - bonding_duration;
// prune out everything that's from before the first-kept index.
let n_to_prune =
bonded.iter().take_while(|&&(era_idx, _)| era_idx < first_kept).count();
// kill slashing metadata.
for (pruned_era, _) in bonded.drain(..n_to_prune) {
slashing::clear_era_metadata::<T>(pruned_era);
}
if let Some(&(_, first_session)) = bonded.first() {
T::SessionInterface::prune_historical_up_to(first_session);
}
}
});
Self::apply_unapplied_slashes(active_era);
}
/// Compute payout for era.
fn end_era(active_era: ActiveEraInfo, _session_index: SessionIndex) {
// Note: active_era_start can be None if end era is called during genesis config.
let now = <system::Module<T>>::block_number();
<EraStartBlockNumber<T>>::put(now);
if let Some(active_era_start) = active_era.start {
let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::<u64>();
let era_duration = now_as_millis_u64 - active_era_start;
let (validator_payout, max_payout) = inflation::compute_total_payout(
&T::RewardCurve::get(),
Self::eras_total_stake(&active_era.index),
T::Currency::total_issuance(),
// Duration of era; more than u64::MAX is rewarded as u64::MAX.
era_duration.saturated_into::<u64>(),
);
let rest = max_payout.saturating_sub(validator_payout);
Self::deposit_event(RawEvent::EraPayout(active_era.index, validator_payout, rest));
// Set ending era reward.
<ErasValidatorReward<T>>::insert(&active_era.index, validator_payout);
T::RewardRemainder::on_unbalanced(T::Currency::issue(rest));
}
}
/// Plan a new era. Return the potential new staking set.
fn new_era(start_session_index: SessionIndex) -> Option<Vec<T::AccountId>> {
// Increment or set current era.
let current_era = CurrentEra::mutate(|s| {
*s = Some(s.map(|s| s + 1).unwrap_or(0));
s.unwrap()
});
ErasStartSessionIndex::insert(¤t_era, &start_session_index);
// Clean old era information.
if let Some(old_era) = current_era.checked_sub(Self::history_depth() + 1) {
Self::clear_era_information(old_era);
}
// Set staking information for new era.
let maybe_new_validators = Self::select_and_update_validators(current_era);
maybe_new_validators
}
/// Remove all the storage items associated with the election.
fn close_election_window() {
// Close window.
<EraElectionStatus<T>>::put(ElectionStatus::Closed);
// Kill snapshots.
Self::kill_stakers_snapshot();
// Don't track final session.
IsCurrentSessionFinal::put(false);
}
/// Select the new validator set at the end of the era.
///
/// Runs [`try_do_phragmen`] and updates the following storage items:
/// - [`EraElectionStatus`]: with `None`.
/// - [`ErasStakers`]: with the new staker set.
/// - [`ErasStakersClipped`].
/// - [`ErasValidatorPrefs`].
/// - [`ErasTotalStake`]: with the new total stake.
/// - [`SnapshotValidators`] and [`SnapshotNominators`] are both removed.
///
/// Internally, [`QueuedElected`], snapshots and [`QueuedScore`] are also consumed.
///
/// If the election has been successful, It passes the new set upwards.
///
/// This should only be called at the end of an era.
fn select_and_update_validators(current_era: EraIndex) -> Option<Vec<T::AccountId>> {
if let Some(ElectionResult::<T::AccountId, BalanceOf<T>> {
elected_stashes,
exposures,
compute,
}) = Self::try_do_election()
{
// Totally close the election round and data.
Self::close_election_window();
// Populate Stakers and write slot stake.
let mut total_stake: BalanceOf<T> = Zero::zero();
exposures.into_iter().for_each(|(stash, exposure)| {
total_stake = total_stake.saturating_add(exposure.total);
<ErasStakers<T>>::insert(current_era, &stash, &exposure);
let mut exposure_clipped = exposure;
let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize;
if exposure_clipped.others.len() > clipped_max_len {
exposure_clipped.others.sort_by(|a, b| a.value.cmp(&b.value).reverse());
exposure_clipped.others.truncate(clipped_max_len);
}
<ErasStakersClipped<T>>::insert(¤t_era, &stash, exposure_clipped);
});
// Insert current era staking information
<ErasTotalStake<T>>::insert(¤t_era, total_stake);
// collect the pref of all winners
for stash in &elected_stashes {
let pref = Self::validators(stash);
<ErasValidatorPrefs<T>>::insert(¤t_era, stash, pref);
}
// emit event
Self::deposit_event(RawEvent::StakingElection(compute));
log!(
info,
"💸 new validator set of size {:?} has been elected via {:?} for era {:?}",
elected_stashes.len(),
compute,
current_era,
);
Some(elected_stashes)
} else {
None
}
}
/// Select a new validator set from the assembled stakers and their role preferences. It tries
/// first to peek into [`QueuedElected`]. Otherwise, it runs a new on-chain phragmen election.
///
/// If [`QueuedElected`] and [`QueuedScore`] exists, they are both removed. No further storage
/// is updated.
fn try_do_election() -> Option<ElectionResult<T::AccountId, BalanceOf<T>>> {
// an election result from either a stored submission or locally executed one.
let next_result = <QueuedElected<T>>::take().or_else(|| Self::do_on_chain_phragmen());
// either way, kill this. We remove it here to make sure it always has the exact same
// lifetime as `QueuedElected`.
QueuedScore::kill();
next_result
}
/// Execute election and return the new results. The edge weights are processed into support
/// values.
///
/// This is basically a wrapper around [`do_phragmen`] which translates
/// `PrimitiveElectionResult` into `ElectionResult`.
///
/// No storage item is updated.
fn do_on_chain_phragmen() -> Option<ElectionResult<T::AccountId, BalanceOf<T>>> {
if let Some(phragmen_result) = Self::do_phragmen::<ChainAccuracy>(0) {
let elected_stashes = phragmen_result
.winners
.iter()
.map(|(s, _)| s.clone())
.collect::<Vec<T::AccountId>>();
let assignments = phragmen_result.assignments;
let staked_assignments = sp_npos_elections::assignment_ratio_to_staked(
assignments,
Self::slashable_balance_of_vote_weight,
);
let supports = build_support_map::<T::AccountId>(&elected_stashes, &staked_assignments)
.map_err(|_| {
log!(
error,
"💸 on-chain phragmen is failing due to a problem in the result. This must be a bug."
)
})
.ok()?;
// collect exposures
let exposures = Self::collect_exposure(supports);
// In order to keep the property required by `on_session_ending` that we must return the
// new validator set even if it's the same as the old, as long as any underlying
// economic conditions have changed, we don't attempt to do any optimization where we
// compare against the prior set.
Some(ElectionResult::<T::AccountId, BalanceOf<T>> {
elected_stashes,
exposures,
compute: ElectionCompute::OnChain,
})
} else {
// There were not enough candidates for even our minimal level of functionality. This is
// bad. We should probably disable all functionality except for block production and let
// the chain keep producing blocks until we can decide on a sufficiently substantial
// set. TODO: #2494
None
}
}
/// Execute phragmen election and return the new results. No post-processing is applied and the
/// raw edge weights are returned.
///
/// Self votes are added and nominations before the most recent slashing span are ignored.
///
/// No storage item is updated.
pub fn do_phragmen<Accuracy: PerThing>(
iterations: usize,
) -> Option<PrimitiveElectionResult<T::AccountId, Accuracy>>
where
ExtendedBalance: From<InnerOf<Accuracy>>,
{
let mut all_nominators: Vec<(T::AccountId, VoteWeight, Vec<T::AccountId>)> = Vec::new();
let mut all_validators = Vec::new();
for (validator, _) in <Validators<T>>::iter() {
// append self vote
let self_vote = (
validator.clone(),
Self::slashable_balance_of_vote_weight(&validator),
vec![validator.clone()],
);
all_nominators.push(self_vote);
all_validators.push(validator);
}
let nominator_votes = <Nominators<T>>::iter().map(|(nominator, nominations)| {
let Nominations { submitted_in, mut targets, suppressed: _ } = nominations;
// Filter out nomination targets which were nominated before the most recent
// slashing span.
targets.retain(|stash| {
<Self as Store>::SlashingSpans::get(&stash)
.map_or(true, |spans| submitted_in >= spans.last_nonzero_slash())
});
(nominator, targets)
});
all_nominators.extend(nominator_votes.map(|(n, ns)| {
let s = Self::slashable_balance_of_vote_weight(&n);
(n, s, ns)
}));
if all_validators.len() < Self::minimum_validator_count().max(1) as usize {
// If we don't have enough candidates, nothing to do.
log!(
error,
"💸 Chain does not have enough staking candidates to operate. Era {:?}.",
Self::current_era()
);
None
} else {
seq_phragmen::<_, Accuracy>(
Self::validator_count() as usize,
all_validators,
all_nominators,
Some((iterations, 0)), // exactly run `iterations` rounds.
)
.map_err(|err| log!(error, "Call to seq-phragmen failed due to {}", err))
.ok()
}
}
/// Consume a set of [`Supports`] from [`sp_npos_elections`] and collect them into a
/// [`Exposure`]
fn collect_exposure(
supports: SupportMap<T::AccountId>,
) -> Vec<(T::AccountId, Exposure<T::AccountId, BalanceOf<T>>)> {
let to_balance = |e: ExtendedBalance| {
<T::CurrencyToVote as Convert<ExtendedBalance, BalanceOf<T>>>::convert(e)
};
supports
.into_iter()
.map(|(validator, support)| {
// build `struct exposure` from `support`
let mut others = Vec::with_capacity(support.voters.len());
let mut own: BalanceOf<T> = Zero::zero();
let mut total: BalanceOf<T> = Zero::zero();
support
.voters
.into_iter()
.map(|(nominator, weight)| (nominator, to_balance(weight)))
.for_each(|(nominator, stake)| {
if nominator == validator {
own = own.saturating_add(stake);
} else {
others.push(IndividualExposure { who: nominator, value: stake });
}
total = total.saturating_add(stake);
});
let exposure = Exposure { own, others, total };
(validator, exposure)
})
.collect::<Vec<(T::AccountId, Exposure<_, _>)>>()
}
/// Remove all associated data of a stash account from the staking system.
///
/// Assumes storage is upgraded before calling.
///
/// This is called:
/// - after a `withdraw_unbond()` call that frees all of a stash's bonded balance.
/// - through `reap_stash()` if the balance has fallen to zero (through slashing).
fn kill_stash(stash: &T::AccountId, num_slashing_spans: u32) -> DispatchResult {
let controller = <Bonded<T>>::get(stash).ok_or(Error::<T>::NotStash)?;
slashing::clear_stash_metadata::<T>(stash, num_slashing_spans)?;
<Bonded<T>>::remove(stash);
<Ledger<T>>::remove(&controller);
<Payee<T>>::remove(stash);
<Validators<T>>::remove(stash);
<Nominators<T>>::remove(stash);
system::Module::<T>::dec_ref(stash);
Ok(())
}
/// Clear all era information for given era.
fn clear_era_information(era_index: EraIndex) {
<ErasStakers<T>>::remove_prefix(era_index);
<ErasStakersClipped<T>>::remove_prefix(era_index);
<ErasValidatorPrefs<T>>::remove_prefix(era_index);
<ErasValidatorReward<T>>::remove(era_index);
<ErasRewardPoints<T>>::remove(era_index);
<ErasTotalStake<T>>::remove(era_index);
ErasStartSessionIndex::remove(era_index);
}
/// Apply previously-unapplied slashes on the beginning of a new era, after a delay.
fn apply_unapplied_slashes(active_era: EraIndex) {
let slash_defer_duration = T::SlashDeferDuration::get();
<Self as Store>::EarliestUnappliedSlash::mutate(|earliest| {
if let Some(ref mut earliest) = earliest {
let keep_from = active_era.saturating_sub(slash_defer_duration);
for era in (*earliest)..keep_from {
let era_slashes = <Self as Store>::UnappliedSlashes::take(&era);
for slash in era_slashes {
slashing::apply_slash::<T>(slash);
}
}
*earliest = (*earliest).max(keep_from)
}
})
}
/// Add reward points to validators using their stash account ID.
///
/// Validators are keyed by stash account ID and must be in the current elected set.
///
/// For each element in the iterator the given number of points in u32 is added to the
/// validator, thus duplicates are handled.
///
/// At the end of the era each the total payout will be distributed among validator
/// relatively to their points.
///
/// COMPLEXITY: Complexity is `number_of_validator_to_reward x current_elected_len`.
/// If you need to reward lots of validator consider using `reward_by_indices`.
pub fn reward_by_ids(validators_points: impl IntoIterator<Item = (T::AccountId, u32)>) {
if let Some(active_era) = Self::active_era() {
<ErasRewardPoints<T>>::mutate(active_era.index, |era_rewards| {
for (validator, points) in validators_points.into_iter() {
*era_rewards.individual.entry(validator).or_default() += points;
era_rewards.total += points;
}
});
}
}
/// Ensures that at the end of the current session there will be a new era.
fn ensure_new_era() {
match ForceEra::get() {
Forcing::ForceAlways | Forcing::ForceNew => (),
_ => ForceEra::put(Forcing::ForceNew),
}
}
fn will_era_be_forced() -> bool {
match ForceEra::get() {
Forcing::ForceAlways | Forcing::ForceNew => true,
Forcing::ForceNone | Forcing::NotForcing => false,
}
}
#[cfg(feature = "runtime-benchmarks")]
pub fn add_era_stakers(
current_era: EraIndex,
controller: T::AccountId,
exposure: Exposure<T::AccountId, BalanceOf<T>>,
) {
<ErasStakers<T>>::insert(¤t_era, &controller, &exposure);
}
#[cfg(feature = "runtime-benchmarks")]
pub fn put_election_status(status: ElectionStatus<T::BlockNumber>) {
<EraElectionStatus<T>>::put(status);
}
#[cfg(feature = "runtime-benchmarks")]
pub fn set_slash_reward_fraction(fraction: Perbill) {
SlashRewardFraction::put(fraction);
}
}
/// In this implementation `new_session(session)` must be called before `end_session(session-1)`
/// i.e. the new session must be planned before the ending of the previous session.
///
/// Once the first new_session is planned, all session must start and then end in order, though
/// some session can lag in between the newest session planned and the latest session started.
impl<T: Trait> pallet_session::SessionManager<T::AccountId> for Module<T> {
fn new_session(new_index: SessionIndex) -> Option<Vec<T::AccountId>> {
Self::new_session(new_index)
}
fn start_session(start_index: SessionIndex) {
Self::start_session(start_index)
}
fn end_session(end_index: SessionIndex) {
Self::end_session(end_index)
}
}
impl<T: Trait> historical::SessionManager<T::AccountId, Exposure<T::AccountId, BalanceOf<T>>>
for Module<T>
{
fn new_session(
new_index: SessionIndex,
) -> Option<Vec<(T::AccountId, Exposure<T::AccountId, BalanceOf<T>>)>> {
<Self as pallet_session::SessionManager<_>>::new_session(new_index).map(|validators| {
let current_era = Self::current_era()
// Must be some as a new era has been created.
.unwrap_or(0);
validators
.into_iter()
.map(|v| {
let exposure = Self::eras_stakers(current_era, &v);
(v, exposure)
})
.collect()
})
}
fn start_session(start_index: SessionIndex) {
<Self as pallet_session::SessionManager<_>>::start_session(start_index)
}
fn end_session(end_index: SessionIndex) {
<Self as pallet_session::SessionManager<_>>::end_session(end_index)
}
}
/// Add reward points to block authors:
/// * 20 points to the block producer for producing a (non-uncle) block in the relay chain,
/// * 2 points to the block producer for each reference to a previously unreferenced uncle, and
/// * 1 point to the producer of each referenced uncle block.
impl<T> pallet_authorship::EventHandler<T::AccountId, T::BlockNumber> for Module<T>
where
T: Trait + pallet_authorship::Trait + pallet_session::Trait,
{
fn note_author(author: T::AccountId) {
Self::reward_by_ids(vec![(author, 20)])
}
fn note_uncle(author: T::AccountId, _age: T::BlockNumber) {
Self::reward_by_ids(vec![(<pallet_authorship::Module<T>>::author(), 2), (author, 1)])
}
}
/// A `Convert` implementation that finds the stash of the given controller account,
/// if any.
pub struct StashOf<T>(sp_std::marker::PhantomData<T>);
impl<T: Trait> Convert<T::AccountId, Option<T::AccountId>> for StashOf<T> {
fn convert(controller: T::AccountId) -> Option<T::AccountId> {
<Module<T>>::ledger(&controller).map(|l| l.stash)
}
}
/// A typed conversion from stash account ID to the active exposure of nominators
/// on that account.
///
/// Active exposure is the exposure of the validator set currently validating, i.e. in
/// `active_era`. It can differ from the latest planned exposure in `current_era`.
pub struct ExposureOf<T>(sp_std::marker::PhantomData<T>);
impl<T: Trait> Convert<T::AccountId, Option<Exposure<T::AccountId, BalanceOf<T>>>>
for ExposureOf<T>
{
fn convert(validator: T::AccountId) -> Option<Exposure<T::AccountId, BalanceOf<T>>> {
if let Some(active_era) = <Module<T>>::active_era() {
Some(<Module<T>>::eras_stakers(active_era.index, &validator))
} else {
None
}
}
}
/// This is intended to be used with `FilterHistoricalOffences`.
impl<T: Trait>
OnOffenceHandler<T::AccountId, pallet_session::historical::IdentificationTuple<T>, Weight>
for Module<T>
where
T: pallet_session::Trait<ValidatorId = <T as frame_system::Trait>::AccountId>,
T: pallet_session::historical::Trait<
FullIdentification = Exposure<<T as frame_system::Trait>::AccountId, BalanceOf<T>>,
FullIdentificationOf = ExposureOf<T>,
>,
T::SessionHandler: pallet_session::SessionHandler<<T as frame_system::Trait>::AccountId>,
T::SessionManager: pallet_session::SessionManager<<T as frame_system::Trait>::AccountId>,
T::ValidatorIdOf: Convert<
<T as frame_system::Trait>::AccountId,
Option<<T as frame_system::Trait>::AccountId>,
>,
{
fn on_offence(
offenders: &[OffenceDetails<
T::AccountId,
pallet_session::historical::IdentificationTuple<T>,
>],
slash_fraction: &[Perbill],
slash_session: SessionIndex,
) -> Result<Weight, ()> {
if !Self::can_report() {
return Err(())
}
let reward_proportion = SlashRewardFraction::get();
let mut consumed_weight: Weight = 0;
let mut add_db_reads_writes = |reads, writes| {
consumed_weight += T::DbWeight::get().reads_writes(reads, writes);
};
let active_era = {
let active_era = Self::active_era();
add_db_reads_writes(1, 0);
if active_era.is_none() {
// this offence need not be re-submitted.
return Ok(consumed_weight)
}
active_era.expect("value checked not to be `None`; qed").index
};
let active_era_start_session_index = Self::eras_start_session_index(active_era)
.unwrap_or_else(|| {
frame_support::print("Error: start_session_index must be set for current_era");
0
});
add_db_reads_writes(1, 0);
let window_start = active_era.saturating_sub(T::BondingDuration::get());
// fast path for active-era report - most likely.
// `slash_session` cannot be in a future active era. It must be in `active_era` or before.
let slash_era = if slash_session >= active_era_start_session_index {
active_era
} else {
let eras = BondedEras::get();
add_db_reads_writes(1, 0);
// reverse because it's more likely to find reports from recent eras.
match eras.iter().rev().filter(|&&(_, ref sesh)| sesh <= &slash_session).next() {
Some(&(ref slash_era, _)) => *slash_era,
// before bonding period. defensive - should be filtered out.
None => return Ok(consumed_weight),
}
};
<Self as Store>::EarliestUnappliedSlash::mutate(|earliest| {
if earliest.is_none() {
*earliest = Some(active_era)
}
});
add_db_reads_writes(1, 1);
let slash_defer_duration = T::SlashDeferDuration::get();
let invulnerables = Self::invulnerables();
add_db_reads_writes(1, 0);
for (details, slash_fraction) in offenders.iter().zip(slash_fraction) {
let (stash, exposure) = &details.offender;
// Skip if the validator is invulnerable.
if invulnerables.contains(stash) {
continue
}
let unapplied = slashing::compute_slash::<T>(slashing::SlashParams {
stash,
slash: *slash_fraction,
exposure,
slash_era,
window_start,
now: active_era,
reward_proportion,
});
if let Some(mut unapplied) = unapplied {
let nominators_len = unapplied.others.len() as u64;
let reporters_len = details.reporters.len() as u64;
{
let upper_bound = 1 /* Validator/NominatorSlashInEra */ + 2 /* fetch_spans */;
let rw = upper_bound + nominators_len * upper_bound;
add_db_reads_writes(rw, rw);
}
unapplied.reporters = details.reporters.clone();
if slash_defer_duration == 0 {
// apply right away.
slashing::apply_slash::<T>(unapplied);
{
let slash_cost = (6, 5);
let reward_cost = (2, 2);
add_db_reads_writes(
(1 + nominators_len) * slash_cost.0 + reward_cost.0 * reporters_len,
(1 + nominators_len) * slash_cost.1 + reward_cost.1 * reporters_len,
);
}
} else {
// defer to end of some `slash_defer_duration` from now.
<Self as Store>::UnappliedSlashes::mutate(active_era, move |for_later| {
for_later.push(unapplied)
});
add_db_reads_writes(1, 1);
}
} else {
add_db_reads_writes(4 /* fetch_spans */, 5 /* kick_out_if_recent */)
}
}
Ok(consumed_weight)
}
fn can_report() -> bool {
Self::era_election_status().is_closed()
}
}
/// Filter historical offences out and only allow those from the bonding period.
pub struct FilterHistoricalOffences<T, R> {
_inner: sp_std::marker::PhantomData<(T, R)>,
}
impl<T, Reporter, Offender, R, O> ReportOffence<Reporter, Offender, O>
for FilterHistoricalOffences<Module<T>, R>
where
T: Trait,
R: ReportOffence<Reporter, Offender, O>,
O: Offence<Offender>,
{
fn report_offence(reporters: Vec<Reporter>, offence: O) -> Result<(), OffenceError> {
// disallow any slashing from before the current bonding period.
let offence_session = offence.session_index();
let bonded_eras = BondedEras::get();
if bonded_eras.first().filter(|(_, start)| offence_session >= *start).is_some() {
R::report_offence(reporters, offence)
} else {
<Module<T>>::deposit_event(RawEvent::OldSlashingReportDiscarded(offence_session));
Ok(())
}
}
fn is_known_offence(offenders: &[Offender], time_slot: &O::TimeSlot) -> bool {
R::is_known_offence(offenders, time_slot)
}
}
#[allow(deprecated)]
impl<T: Trait> frame_support::unsigned::ValidateUnsigned for Module<T> {
type Call = Call<T>;
fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity {
if let Call::submit_election_solution_unsigned(_, _, score, era, _) = call {
use offchain_election::DEFAULT_LONGEVITY;
// discard solution not coming from the local OCW.
match source {
TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ },
_ => {
log!(debug, "rejecting unsigned transaction because it is not local/in-block.");
return InvalidTransaction::Call.into()
},
}
if let Err(error_with_post_info) = Self::pre_dispatch_checks(*score, *era) {
let invalid = to_invalid(error_with_post_info);
log!(
debug,
"💸 validate unsigned pre dispatch checks failed due to error #{:?}.",
invalid,
);
return invalid.into()
}
log!(debug, "💸 validateUnsigned succeeded for a solution at era {}.", era);
ValidTransaction::with_tag_prefix("StakingOffchain")
// The higher the score[0], the better a solution is.
.priority(T::UnsignedPriority::get().saturating_add(score[0].saturated_into()))
// Defensive only. A single solution can exist in the pool per era. Each validator
// will run OCW at most once per era, hence there should never exist more than one
// transaction anyhow.
.and_provides(era)
// Note: this can be more accurate in the future. We do something like
// `era_end_block - current_block` but that is not needed now as we eagerly run
// offchain workers now and the above should be same as `T::ElectionLookahead`
// without the need to query more storage in the validation phase. If we randomize
// offchain worker, then we might re-consider this.
.longevity(
TryInto::<u64>::try_into(T::ElectionLookahead::get())
.unwrap_or(DEFAULT_LONGEVITY),
)
// We don't propagate this. This can never the validated at a remote node.
.propagate(false)
.build()
} else {
InvalidTransaction::Call.into()
}
}
fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> {
if let Call::submit_election_solution_unsigned(_, _, score, era, _) = call {
// IMPORTANT NOTE: These checks are performed in the dispatch call itself, yet we need
// to duplicate them here to prevent a block producer from putting a previously
// validated, yet no longer valid solution on chain.
// OPTIMISATION NOTE: we could skip this in the `submit_election_solution_unsigned`
// since we already do it here. The signed version needs it though. Yer for now we keep
// this duplicate check here so both signed and unsigned can use a singular
// `check_and_replace_solution`.
Self::pre_dispatch_checks(*score, *era)
.map(|_| ())
.map_err(to_invalid)
.map_err(Into::into)
} else {
Err(InvalidTransaction::Call.into())
}
}
}
/// Check that list is sorted and has no duplicates.
fn is_sorted_and_unique(list: &[u32]) -> bool {
list.windows(2).all(|w| w[0] < w[1])
}
/// convert a DispatchErrorWithPostInfo to a custom InvalidTransaction with the inner code being the
/// error number.
fn to_invalid(error_with_post_info: DispatchErrorWithPostInfo) -> InvalidTransaction {
let error = error_with_post_info.error;
let error_number = match error {
DispatchError::Module { error, .. } => error,
_ => 0,
};
InvalidTransaction::Custom(error_number)
}
|
#[cfg(any(target_os = "linux", target_os = "android"))]
#[path = "epoll.rs"]
mod select;
#[cfg(any(
target_os = "bitrig",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd"
))]
#[path = "kqueue.rs"]
mod select;
#[cfg(feature = "io_cancel")]
pub mod cancel;
pub mod co_io;
pub mod net;
pub mod wait_io;
#[cfg(feature = "io_timeout")]
use std::cell::RefCell;
use std::ops::Deref;
use std::os::unix::io::{AsRawFd, RawFd};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::{fmt, io};
use crate::coroutine_impl::{run_coroutine, CoroutineImpl};
use crate::io::thread::ASSOCIATED_IO_RET;
use crate::likely::likely;
use crate::scheduler::get_scheduler;
use crate::sync::AtomicOption;
#[cfg(feature = "io_timeout")]
use crate::timeout_list::{TimeOutList, TimeoutHandle};
use crate::yield_now::get_co_para;
#[cfg(feature = "io_timeout")]
use crate::yield_now::set_co_para;
pub use self::select::{Selector, SysEvent};
#[inline]
pub fn add_socket<T: AsRawFd + ?Sized>(t: &T) -> io::Result<IoData> {
get_scheduler().get_selector().add_fd(IoData::new(t))
}
#[inline]
pub fn mod_socket(io: &IoData, is_read: bool) -> io::Result<()> {
get_scheduler().get_selector().mod_fd(io, is_read)
}
#[inline]
fn del_socket(io: &IoData) {
// transfer the io to the selector
get_scheduler().get_selector().del_fd(io);
}
// deal with the io result
#[inline]
fn co_io_result(is_coroutine: bool) -> io::Result<()> {
if likely(is_coroutine) {
match get_co_para() {
None => Ok(()),
Some(err) => Err(err),
}
} else {
let err = ASSOCIATED_IO_RET.with(|io_ret| io_ret.take());
match err {
None => Ok(()),
Some(err) => Err(*err),
}
}
}
#[inline]
fn from_nix_error(err: nix::Error) -> ::std::io::Error {
std::io::Error::from_raw_os_error(err as i32)
}
#[cfg(feature = "io_timeout")]
fn timeout_handler(data: TimerData) {
if data.event_data.is_null() {
return;
}
let event_data = unsafe { &mut *data.event_data };
// remove the event timer
event_data.timer.borrow_mut().take();
// get and check the coroutine
let mut co = match event_data.co.take() {
Some(co) => co,
None => return,
};
set_co_para(&mut co, io::Error::new(io::ErrorKind::TimedOut, "timeout"));
// resume the coroutine with timeout error
run_coroutine(co);
}
// the timeout data
#[cfg(feature = "io_timeout")]
pub struct TimerData {
event_data: *mut EventData,
}
#[cfg(feature = "io_timeout")]
pub type TimerList = TimeOutList<TimerData>;
#[cfg(feature = "io_timeout")]
pub type TimerHandle = TimeoutHandle<TimerData>;
// event associated io data, must be construct in
// each file handle, the epoll event.data would point to it
pub struct EventData {
pub fd: RawFd,
pub io_flag: AtomicBool,
#[cfg(feature = "io_timeout")]
pub timer: RefCell<Option<TimerHandle>>,
pub co: AtomicOption<CoroutineImpl>,
}
unsafe impl Send for EventData {}
unsafe impl Sync for EventData {}
impl EventData {
pub fn new(fd: RawFd) -> EventData {
EventData {
fd,
io_flag: AtomicBool::new(false),
#[cfg(feature = "io_timeout")]
timer: RefCell::new(None),
co: AtomicOption::none(),
}
}
#[cfg(feature = "io_timeout")]
pub fn timer_data(&self) -> TimerData {
TimerData {
event_data: self as *const _ as *mut _,
}
}
#[inline]
pub fn schedule(&self) {
info!("event schedule");
let co = match self.co.take() {
None => return, // it's already take by selector
Some(co) => co,
};
// it's safe to remove the timer since we are running the timer_list in the same thread
#[cfg(feature = "io_timeout")]
self.timer.borrow_mut().take().map(|h| {
unsafe {
// tell the timer function not to cancel the io
// it's not always true that you can really remove the timer entry
h.with_mut_data(|value| value.data.event_data = std::ptr::null_mut());
}
h.remove()
});
// schedule the coroutine
get_scheduler().schedule(co);
}
/// used by local re-schedule that in `subscribe`
#[inline]
pub fn fast_schedule(&self) {
info!("event fast_schedule");
let co = match self.co.take() {
None => return, // it's already take by selector
Some(co) => co,
};
// it's safe to remove the timer since we are running the timer_list in the same thread
#[cfg(feature = "io_timeout")]
self.timer.borrow_mut().take().map(|h| {
unsafe {
// tell the timer function not to cancel the io
// it's not always true that you can really remove the timer entry
h.with_mut_data(|value| value.data.event_data = std::ptr::null_mut());
}
h.remove()
});
// run the coroutine
run_coroutine(co);
}
}
// each file associated data
pub struct IoData(Arc<EventData>);
impl IoData {
pub fn new<T: AsRawFd + ?Sized>(t: &T) -> Self {
let fd = t.as_raw_fd();
let event_data = Arc::new(EventData::new(fd));
IoData(event_data)
}
// clear the io flag
#[inline]
pub fn reset(&self) {
self.io_flag.store(false, Ordering::Relaxed);
}
}
impl Deref for IoData {
type Target = Arc<EventData>;
fn deref(&self) -> &Arc<EventData> {
&self.0
}
}
impl fmt::Debug for IoData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "IoData = {{ ... }}")
}
}
impl Drop for IoData {
fn drop(&mut self) {
del_socket(self);
}
}
unsafe impl Send for IoData {}
|
mod ring1 {
pub mod ring2 {
pub fn test() {
println!("{:?}", "test");
super::ask(); // 能访问父级的方法, 并用它是 private 的
}
}
fn ask() {
println!("{:?}", "ask");
}
}
fn main() {
crate::ring1::ring2::test();
}
|
use std::ffi::OsStr;
use anyhow::Result;
mod block_device;
mod ext4fs;
mod fh;
mod mappe;
pub fn mount_and_run(what: &OsStr, whence: &OsStr) -> Result<()> {
let filesystem = ext4fs::Ext4FS::new(what.to_os_string())?;
let fuse_args: Vec<&OsStr> = vec![&OsStr::new("-o"), &OsStr::new("auto_unmount")];
fuse_mt::mount(fuse_mt::FuseMT::new(filesystem, 1), &whence, &fuse_args)?;
Ok(())
}
|
//! 4 Dimensional Vector
use super::common::{Vec4, Mat4, Quat, random_f32, hypot, EPSILON};
/// Creates a new, empty vec3.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn create() -> Vec4 {
let out: Vec4 = [0_f32; 4];
out
}
/// Creates a new vec4 initialized with values from an existing vector.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn clone(a: &Vec4) -> Vec4 {
let mut out: Vec4 = [0_f32; 4];
out[0] = a[0];
out[1] = a[1];
out[2] = a[2];
out[3] = a[3];
out
}
/// Creates a new vec4 initialized with the given values.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn from_values(x: f32, y: f32, z: f32, w: f32) -> Vec4{
let mut out: Vec4 = [0_f32; 4];
out[0] = x;
out[1] = y;
out[2] = z;
out[3] = w;
out
}
/// Copy the values from one vec4 to another.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn copy(out: &mut Vec4, a: &Vec4) -> Vec4 {
out[0] = a[0];
out[1] = a[1];
out[2] = a[2];
out[3] = a[3];
*out
}
/// Set the components of a vec4 to the given values.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn set(out: &mut Vec4, x: f32, y: f32, z: f32, w: f32) -> Vec4 {
out[0] = x;
out[1] = y;
out[2] = z;
out[3] = w;
*out
}
/// Adds two vec4's.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn add(out: &mut Vec4, a: &Vec4, b: &Vec4) -> Vec4 {
out[0] = a[0] + b[0];
out[1] = a[1] + b[1];
out[2] = a[2] + b[2];
out[3] = a[3] + b[3];
*out
}
/// Subtracts vector b from vector a.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn subtract(out: &mut Vec4, a: &Vec4, b: &Vec4) -> Vec4 {
out[0] = a[0] - b[0];
out[1] = a[1] - b[1];
out[2] = a[2] - b[2];
out[3] = a[3] - b[3];
*out
}
/// Multiplies two vec4's.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn multiply(out: &mut Vec4, a: &Vec4, b: &Vec4) -> Vec4 {
out[0] = a[0] * b[0];
out[1] = a[1] * b[1];
out[2] = a[2] * b[2];
out[3] = a[3] * b[3];
*out
}
/// Divides two vec4's.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn divide(out: &mut Vec4, a: &Vec4, b: &Vec4) -> Vec4 {
out[0] = a[0] / b[0];
out[1] = a[1] / b[1];
out[2] = a[2] / b[2];
out[3] = a[3] / b[3];
*out
}
/// f32::ceil the components of a vec4.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn ceil(out: &mut Vec4, a: &Vec4) -> Vec4 {
out[0] = f32::ceil(a[0]);
out[1] = f32::ceil(a[1]);
out[2] = f32::ceil(a[2]);
out[3] = f32::ceil(a[3]);
*out
}
/// f32::floor the components of a vec4.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn floor(out: &mut Vec4, a: &Vec4) -> Vec4 {
out[0] = f32::floor(a[0]);
out[1] = f32::floor(a[1]);
out[2] = f32::floor(a[2]);
out[3] = f32::floor(a[3]);
*out
}
/// Returns the minimum of two vec4's.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn min(out: &mut Vec4, a: &Vec4, b: &Vec4) -> Vec4 {
out[0] = f32::min(a[0], b[0]);
out[1] = f32::min(a[1], b[1]);
out[2] = f32::min(a[2], b[2]);
out[3] = f32::min(a[3], b[3]);
*out
}
/// Returns the maximum of two vec4's.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn max(out: &mut Vec4, a: &Vec4, b: &Vec4) -> Vec4 {
out[0] = f32::max(a[0], b[0]);
out[1] = f32::max(a[1], b[1]);
out[2] = f32::max(a[2], b[2]);
out[3] = f32::max(a[3], b[3]);
*out
}
/// f32::round the components of a vec4.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn round(out: &mut Vec4, a: &Vec4) -> Vec4 {
out[0] = f32::round(a[0]);
out[1] = f32::round(a[1]);
out[2] = f32::round(a[2]);
out[3] = f32::round(a[3]);
*out
}
/// Scales a vec4 by a scalar number.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn scale(out: &mut Vec4, a: &Vec4, b: f32) -> Vec4 {
out[0] = a[0] * b;
out[1] = a[1] * b;
out[2] = a[2] * b;
out[3] = a[3] * b;
*out
}
/// Adds two vec4's after scaling the second operand by a scalar value.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn scale_and_add(out: &mut Vec4, a: &Vec4, b: &Vec4, scale: f32) -> Vec4 {
out[0] = a[0] + (b[0] * scale);
out[1] = a[1] + (b[1] * scale);
out[2] = a[2] + (b[2] * scale);
out[3] = a[3] + (b[3] * scale);
*out
}
/// Calculates the euclidian distance between two vec4's.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn distance(a: &Vec4, b: &Vec4) -> f32 {
let mut dist: Vec4 = [0_f32; 4];
dist[0] = b[0] - a[0]; // x
dist[1] = b[1] - a[1]; // y
dist[2] = b[2] - a[2]; // z
dist[3] = b[3] - a[3]; // w
hypot(&dist.to_vec())
}
/// Calculates the squared euclidian distance between two vec4's.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn squared_distance(a: &Vec4, b: &Vec4) -> f32 {
let x = b[0] - a[0];
let y = b[1] - a[1];
let z = b[2] - a[2];
let w = b[3] - a[3];
x*x + y*y + z*z + w*w
}
/// Calculates the length of a vec4.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn length(a: &Vec4) -> f32 {
let mut len: Vec4 = [0_f32; 4];
len[0] = a[0]; // x
len[1] = a[1]; // y
len[2] = a[2]; // z
len[3] = a[3]; // w
hypot(&len.to_vec())
}
/// calculates the squared length of a vec4.
///
/// [glmatrix documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn squared_length(a: &[f32]) -> f32 {
let x = a[0];
let y = a[1];
let z = a[2];
let w = a[3];
x*x + y*y + z*z + w*w
}
/// Negates the components of a vec4.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn negate(out: &mut Vec4, a: &Vec4) -> Vec4 {
out[0] = -a[0];
out[1] = -a[1];
out[2] = -a[2];
out[3] = -a[3];
*out
}
/// Returns the inverse of the components of a vec4.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn inverse(out: &mut Vec4, a: &Vec4) -> Vec4 {
out[0] = 1.0 / a[0];
out[1] = 1.0 / a[1];
out[2] = 1.0 / a[2];
out[3] = 1.0 / a[3];
*out
}
/// Normalize a vec4.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn normalize(out: &mut Vec4, a: &Vec4) -> Vec4 {
let x = a[0];
let y = a[1];
let z = a[2];
let w = a[3];
let mut len = x*x + y*y + z*z + w*w;
if len > 0_f32 {
len = 1_f32 / f32::sqrt(len);
}
out[0] = x * len;
out[1] = y * len;
out[2] = z * len;
out[3] = w * len;
*out
}
/// Calculates the dot product of two vec4's.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn dot(a: &Vec4, b: &Vec4) -> f32 {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]
}
/// Returns the cross-product of three vectors in a 4-dimensional space.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn cross (out: &mut Vec4, u: &Vec4, v: &Vec4, w: &Vec4) -> Vec4 {
let a = (v[0] * w[1]) - (v[1] * w[0]);
let b = (v[0] * w[2]) - (v[2] * w[0]);
let c = (v[0] * w[3]) - (v[3] * w[0]);
let d = (v[1] * w[2]) - (v[2] * w[1]);
let e = (v[1] * w[3]) - (v[3] * w[1]);
let f = (v[2] * w[3]) - (v[3] * w[2]);
let g = u[0];
let h = u[1];
let i = u[2];
let j = u[3];
out[0] = (h * f) - (i * e) + (j * d);
out[1] = -(g * f) + (i * c) - (j * b);
out[2] = (g * e) - (h * c) + (j * a);
out[3] = -(g * d) + (h * b) - (i * a);
*out
}
/// Performs a linear interpolation between two vec4's.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn lerp(out: &mut Vec4, a: &Vec4, b: &Vec4, t: f32) -> Vec4 {
let ax = a[0];
let ay = a[1];
let az = a[2];
let aw = a[3];
out[0] = ax + t * (b[0] - ax);
out[1] = ay + t * (b[1] - ay);
out[2] = az + t * (b[2] - az);
out[3] = aw + t * (b[3] - aw);
*out
}
/// Generates a random vector with the given scale.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn random(out: &mut Vec4, scale: Option<f32>) -> Vec4 {
let scale = match scale {
Some(scale) => scale,
None => 1_f32,
};
// Marsaglia, George. Choosing a Point from the Surface of a
// Sphere. Ann. f32:: Statist. 43 (1972), no. 2, 645--646.
// http://projecteuclid.org/euclid.aoms/1177692644;
let mut v1 = 0_f32;
let mut v2 = 0_f32;
let mut v3 = 0_f32;
let mut v4 = 0_f32;
let mut s1 = 2_f32;
let mut s2 = 2_f32;
while s1 > 1_f32 {
v1 = random_f32() * 2. - 1.;
v2 = random_f32() * 2. - 1.;
s1 = v1 * v1 + v2 * v2;
}
while s2 > 1_f32 {
v3 = random_f32() * 2. - 1.;
v4 = random_f32() * 2. - 1.;
s2 = v3 * v3 + v4 * v4;
}
let d = f32::sqrt((1_f32 - s1) / s2);
out[0] = scale * v1;
out[1] = scale * v2;
out[2] = scale * v3 * d;
out[3] = scale * v4 * d;
*out
}
/// Transforms the vec4 with a mat4.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn transform_mat4(out: &mut Vec4, a: &Vec4, m: &Mat4) -> Vec4 {
let x = a[0];
let y = a[1];
let z = a[2];
let w = a[3];
out[0] = m[0] * x + m[4] * y + m[8] * z + m[12] * w;
out[1] = m[1] * x + m[5] * y + m[9] * z + m[13] * w;
out[2] = m[2] * x + m[6] * y + m[10] * z + m[14] * w;
out[3] = m[3] * x + m[7] * y + m[11] * z + m[15] * w;
*out
}
/// Transforms the vec4 with a quat.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn transform_quat(out: &mut Vec4, a: &Vec4 , q: &Quat) -> Vec4 {
let x = a[0];
let y = a[1];
let z = a[2];
let qx = q[0];
let qy = q[1];
let qz = q[2];
let qw = q[3];
// calculate quat * vec
let ix = qw * x + qy * z - qz * y;
let iy = qw * y + qz * x - qx * z;
let iz = qw * z + qx * y - qy * x;
let iw = -qx * x - qy * y - qz * z;
// calculate result * inverse quat
out[0] = ix * qw + iw * -qx + iy * -qz - iz * -qy;
out[1] = iy * qw + iw * -qy + iz * -qx - ix * -qz;
out[2] = iz * qw + iw * -qz + ix * -qy - iy * -qx;
out[3] = a[3];
*out
}
/// Set the components of a vec4 to zero.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn zero(out: &mut Vec4) -> Vec4 {
out[0] = 0.0;
out[1] = 0.0;
out[2] = 0.0;
out[3] = 0.0;
*out
}
/// Returns a string representation of a vector.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn string(a: &Vec4) -> String {
let a0 = ["vec4(".to_string(), a[0].to_string()].join("");
let a1 = a[1].to_string();
let a2 = a[2].to_string();
let a3 = [a[3].to_string(), ")".to_string()].join("");
[a0, a1, a2, a3].join(", ")
}
/// Returns whether or not the vectors have exactly the same elements in the same position (when compared with ==).
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn exact_equals(a: &Vec4, b: &Vec4) -> bool {
a[0] == b[0] && a[1] == b[1] && a[2] == b[2] && a[3] == b[3]
}
/// Returns whether or not the vectors have approximately the same elements in the same position..
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn equals(a: &Vec4, b: &Vec4) -> bool {
let a0 = a[0];
let a1 = a[1];
let a2 = a[2];
let a3 = a[3];
let b0 = b[0];
let b1 = b[1];
let b2 = b[2];
let b3 = b[3];
f32::abs(a0 - b0) <= EPSILON * f32::max(1.0, f32::max(f32::abs(a0), f32::abs(b0))) &&
f32::abs(a1 - b1) <= EPSILON * f32::max(1.0, f32::max(f32::abs(a1), f32::abs(b1))) &&
f32::abs(a2 - b2) <= EPSILON * f32::max(1.0, f32::max(f32::abs(a2), f32::abs(b2))) &&
f32::abs(a3 - b3) <= EPSILON * f32::max(1.0, f32::max(f32::abs(a3), f32::abs(b3)))
}
/// Alias for vec4::subtract.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn sub(out: &mut Vec4, a: &Vec4, b: &Vec4) -> Vec4 {
subtract(out, a, b)
}
/// Alias for vec4::multiply.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn mul(out: &mut Vec4, a: &Vec4, b: &Vec4) -> Vec4 {
multiply(out, a, b)
}
/// Alias for vec4::divide.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn div(out: &mut Vec4, a: &Vec4, b: &Vec4) -> Vec4 {
divide(out, a, b)
}
/// Alias for vec4::distance.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn dist(a: &Vec4, b: &Vec4) -> f32 {
distance(a, b)
}
/// Alias for vec4::squared_distance.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn sqr_dist(a: &Vec4, b: &Vec4) -> f32 {
squared_distance(a, b)
}
/// Alias for vec4::length.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn len(a: &Vec4) -> f32 {
length(a)
}
/// Alias for vec4::squred_length.
///
/// [glMatrix Documentation](http://glmatrix.net/docs/vec4.js.html)
pub fn sqr_len(a: &Vec4) -> f32 {
squared_length(a)
}
// /**
// * Perform some operation over an array of vec4s.
// *
// * @param {Array} a the array of vectors to iterate over
// * @param {Number} stride Number of elements between the start of each vec4. If 0 assumes tightly packed
// * @param {Number} offset Number of elements to skip at the beginning of the array
// * @param {Number} count Number of vec4s to iterate over. If 0 iterates over entire array
// * @param {fn} fn fn to call for each vector in the array
// * @param {Object} [arg] additional argument to pass to fn
// * @returns {Array} a
// * @fn
// */
// pub fn for_each(a: &[f32], stride: f32, offset: f32, count: f32, f: fn(), arg){
// }
// pub const forEach = (fn() {
// let vec = create();
// return fn(a, stride, offset, count, fn, arg) {
// let i, l;
// if(!stride) {
// stride = 4;
// }
// if(!offset) {
// offset = 0;
// }
// if(count) {
// l = f32::min((count * stride) + offset, a.length);
// } else {
// l = a.length;
// }
// for(i = offset; i < l; i += stride) {
// vec[0] = a[i]; vec[1] = a[i+1]; vec[2] = a[i+2]; vec[3] = a[i+3];
// fn(vec, vec, arg);
// a[i] = vec[0]; a[i+1] = vec[1]; a[i+2] = vec[2]; a[i+3] = vec[3];
// }
// return a;
// };
// })();
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn create_a_vec4() {
let out = create();
assert_eq!([0., 0., 0., 0.], out);
}
#[test]
fn clone_a_vec4() {
let vec4_a: Vec4 = [1., 2., 3., 4.];
let out = clone(&vec4_a);
assert_eq!([1., 2., 3., 4.], out);
}
#[test]
fn create_vec4_from_values() {
let out = from_values(1., 2., 3., 4.);
assert_eq!([1., 2., 3., 4.], out);
}
#[test]
fn copy_values_from_a_vec4_it_another() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec4_a: Vec4 = [1., 2., 3., 4.];
let result = copy(&mut out, &vec4_a);
assert_eq!([1., 2., 3., 4.], out);
assert_eq!(result, out);
}
#[test]
fn set_vec4_with_values() {
let mut out: Vec4 = [0., 0., 0., 0.];
let result = set(&mut out, 1., 2., 3., 4.);
assert_eq!([1., 2., 3., 4.], out);
assert_eq!(result, out);
}
#[test]
fn add_two_vec4s() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec4_a: Vec4 = [1., 2., 3., 4.];
let vec4_b: Vec4 = [5., 6., 7., 8.];
let result = add(&mut out, &vec4_a, &vec4_b);
assert_eq!([6., 8., 10., 12.], out);
assert_eq!(result, out);
}
#[test]
fn subtract_two_vec4s() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec4_a: Vec4 = [1., 2., 3., 4.];
let vec4_b: Vec4 = [5., 6., 7., 8.];
let result = subtract(&mut out, &vec4_a, &vec4_b);
assert_eq!([-4., -4., -4., -4.], out);
assert_eq!(result, out);
}
#[test]
fn sub_two_vec4s() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec4_a: Vec4 = [1., 2., 3., 4.];
let vec4_b: Vec4 = [5., 6., 7., 8.];
let result = sub(&mut out, &vec4_a, &vec4_b);
assert_eq!([-4., -4., -4., -4.], out);
assert_eq!(result, out);
}
#[test]
fn sub_is_equal_to_subtract() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let vec_b: Vec4 = [5., 6., 7., 8.];
let result_a = subtract(&mut out, &vec_a, &vec_b);
let result_b = sub(&mut out, &vec_a, &vec_b);
assert_eq!(result_a, result_b);
}
#[test]
fn multiply_two_vec4s() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let vec_b: Vec4 = [5., 6., 7., 8.];
let result = multiply(&mut out, &vec_a, &vec_b);
assert_eq!([5., 12., 21., 32.], out);
assert_eq!(result, out);
}
#[test]
fn mul_two_vec4s() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let vec_b: Vec4 = [5., 6., 7., 8.];
let result = mul(&mut out, &vec_a, &vec_b);
assert_eq!([5., 12., 21., 32.], out);
assert_eq!(result, out);
}
#[test]
fn mul_is_equal_to_multiply() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let vec_b: Vec4 = [5., 6., 7., 8.];
let result_a = multiply(&mut out, &vec_a, &vec_b);
let result_b = mul(&mut out, &vec_a, &vec_b);
assert_eq!(result_a, result_b);
}
#[test]
fn divide_two_vec4s() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let vec_b: Vec4 = [5., 6., 7., 8.];
let result = divide(&mut out, &vec_a, &vec_b);
assert_eq!([0.2, 0.33333334, 0.42857143, 0.5], out);
assert_eq!(result, out);
}
#[test]
fn div_two_vec4s() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let vec_b: Vec4 = [5., 6., 7., 8.];
let result = div(&mut out, &vec_a, &vec_b);
assert_eq!([0.2, 0.33333334, 0.42857143, 0.5], out);
assert_eq!(result, out);
}
#[test]
fn div_is_equal_to_divide() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let vec_b: Vec4 = [5., 6., 7., 8.];
let result_a = divide(&mut out, &vec_a, &vec_b);
let result_b = div(&mut out, &vec_a, &vec_b);
assert_eq!(result_a, result_b);
}
#[test]
fn ceil_of_vec4() {
use super::super::common::PI;
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [core::f32::consts::E, PI,
core::f32::consts::SQRT_2,
core::f32::consts::FRAC_1_SQRT_2];
let result = ceil(&mut out, &vec_a);
assert_eq!([3., 4., 2., 1.], out);
assert_eq!(result, out);
}
#[test]
fn floor_of_vec4() {
use super::super::common::PI;
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [core::f32::consts::E, PI,
core::f32::consts::SQRT_2,
core::f32::consts::FRAC_1_SQRT_2];
let result = floor(&mut out, &vec_a);
assert_eq!([2., 3., 1., 0.], out);
assert_eq!(result, out);
}
#[test]
fn min_of_two_vec4() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 3., 1., 3.];
let vec_b: Vec4 = [3., 1., 3., 1.];
let result = min(&mut out, &vec_a, &vec_b);
assert_eq!([1., 1., 1., 1.], out);
assert_eq!(result, out);
}
#[test]
fn max_of_two_vec4() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 3., 1., 3.];
let vec_b: Vec4 = [3., 1., 3., 1.];
let result = max(&mut out, &vec_a, &vec_b);
assert_eq!([3., 3., 3., 3.], out);
assert_eq!(result, out);
}
#[test]
fn round_vec4() {
use super::super::common::PI;
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [core::f32::consts::E, PI,
core::f32::consts::SQRT_2,
core::f32::consts::FRAC_1_SQRT_2];
let result = round(&mut out, &vec_a);
assert_eq!([3., 3., 1., 1.], out);
assert_eq!(result, out);
}
#[test]
fn scale_vec4() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let result = scale(&mut out, &vec_a, 2.);
assert_eq!([2., 4., 6., 8.], out);
assert_eq!(result, out);
}
#[test]
fn scale_and_add_vec4() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let vec_b: Vec4 = [5., 6., 7., 8.];
let result = scale_and_add(&mut out, &vec_a, &vec_b, 0.5);
assert_eq!([3.5, 5., 6.5, 8.], out);
assert_eq!(result, out);
}
#[test]
fn distance_between_vec4s() {
use super::super::common;
let vec_a: Vec4 = [1., 2., 3., 4.];
let vec_b: Vec4 = [5., 6., 7., 8.];
let result = distance(&vec_a, &vec_b);
assert!(common::equals(result, 8.));
}
#[test]
fn dist_between_vec4s() {
use super::super::common;
let vec_a: Vec4 = [1., 2., 3., 4.];
let vec_b: Vec4 = [5., 6., 7., 8.];
let result = dist(&vec_a, &vec_b);
assert!(common::equals(result, 8.));
}
#[test]
fn dist_is_equal_to_distance() {
let vec_a: Vec4 = [1., 2., 3., 4.];
let vec_b: Vec4 = [5., 6., 7., 8.];
let result_a = distance(&vec_a, &vec_b);
let result_b = dist(&vec_a, &vec_b);
assert_eq!(result_a, result_b);
}
#[test]
fn squared_length_vec4() {
let vec_a: Vec4 = [1., 2., 3., 4.];
let result = squared_length(&vec_a);
assert_eq!(result, 30.);
}
#[test]
fn sqr_len_vec4() {
let vec_a: Vec4 = [1., 2., 3., 4.];
let result = sqr_len(&vec_a);
assert_eq!(result, 30.);
}
#[test]
fn sqr_len_is_equal_to_sqr_dist() {
let vec_a: Vec4 = [1., 2., 3., 4.];
let result_a = squared_length(&vec_a);
let result_b = sqr_len(&vec_a);
assert_eq!(result_a, result_b);
}
#[test]
fn length_of_vec4() {
let vec4_a: Vec4 = [1., 2., 3., 4.];
let out = length(&vec4_a);
// they get 5.477225
assert_eq!(5.477226, out);
}
#[test]
fn len_of_vec4() {
let vec4_a: Vec4 = [1., 2., 3., 4.];
let out = len(&vec4_a);
// they get 5.477225
assert_eq!(5.477226, out);
}
#[test]
fn length_is_equal_to_len() {
let vec4_a: Vec4 = [1., 2., 3., 4.];
let out_a = length(&vec4_a);
let out_b = len(&vec4_a);
assert_eq!(out_a, out_b);
}
#[test]
fn squared_length_of_vec4() {
let vec4_a: Vec4 = [1., 2., 3., 4.];
let out = squared_length(&vec4_a);
// they get 5.477225
assert_eq!(30_f32, out);
}
#[test]
fn sqr_len_of_vec4() {
let vec4_a: Vec4 = [1., 2., 3., 4.];
let out = sqr_len(&vec4_a);
// they get 5.477225
assert_eq!(30_f32, out);
}
#[test]
fn squared_length_is_equal_to_sqr_len() {
let vec4_a: Vec4 = [1., 2., 3., 4.];
let out_a = squared_length(&vec4_a);
let out_b = sqr_len(&vec4_a);
assert_eq!(out_a, out_b);
}
#[test]
fn negate_vec4() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let result = negate(&mut out, &vec_a);
assert_eq!(out, [-1., -2., -3., -4.]);
assert_eq!(result, out);
}
#[test]
fn invert_vec4() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let result = inverse(&mut out, &vec_a);
assert_eq!(out, [1., 0.5, 0.33333333333333, 0.25]);
assert_eq!(result, out);
}
#[test]
fn normalize_vec4() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec4_a: Vec4 = [5., 0., 0., 0.];
let result = normalize(&mut out, &vec4_a);
assert_eq!([1., 0., 0., 0.], out);
assert_eq!(result, out);
}
#[test]
fn dot_vec4() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec4_a: Vec4 = [1., 2., 3., 4.];
let result = scale(&mut out, &vec4_a, 2_f32);
assert_eq!([2., 4., 6., 8.], out);
assert_eq!(result, out);
}
#[test]
fn cross_product_of_two_vec4() {
let mut out: Vec4= [0., 0., 0., 0.];
let vec_a: Vec4 = [1., 0., 0., 0.];
let vec_b: Vec4 = [0., 1., 0., 0.];
let vec_c: Vec4 = [0., 0., 1., 0.];
let result = cross(&mut out, &vec_a, &vec_b, &vec_c);
assert_eq!(out, [0., 0., 0., -1.]);
assert_eq!(result, out)
}
#[test]
fn lerp_vec4() {
let mut out: Vec4 = [0., 0., 0., 0.];
let vec4_a: Vec4 = [1., 2., 3., 4.];
let vec4_b: Vec4 = [5., 6., 7., 8.];
let result = add(&mut out, &vec4_a, &vec4_b);
assert_eq!([6., 8., 10., 12.], out);
assert_eq!(result, out);
}
#[test]
fn random_vec3_no_scale() {
let mut out: Vec4 = [0., 0., 0., 0.];
let result = random(&mut out, None);
assert!(out[0] >= -1_f32 && out[0] <= 1_f32);
assert!(out[1] >= -1_f32 && out[1] <= 1_f32);
assert_eq!(result, out);
}
#[test]
fn random_vec3_scaled() {
let scale = 2_f32;
let mut out: Vec4 = [0., 0., 0., 0.];
let result = random(&mut out, Some(scale));
assert!(out[0] >= -1_f32 * scale && out[0] <= 1_f32 * scale);
assert!(out[1] >= -1_f32 * scale && out[1] <= 1_f32 * scale);
assert_eq!(result, out);
}
#[test]
fn transform_mat4_to_vec4() {
let mut out: Vec4 = [0., 0., 0., 0.];
let mat_r: Mat4 = [1., 0., 0., 0.,
0., 1., 0., 0.,
0., 0., 1., 0.,
0., 0., 0., 1.];
let vec_a: Vec4 = [1., 2., 3., 4.];
let result = transform_mat4(&mut out, &vec_a, &mat_r);
assert_eq!([1., 2., 3., 4.], out);
assert_eq!(result, out);
}
#[test]
fn transform_quat_to_vec3() {
let mut out: Vec4 = [0., 0., 0., 0.];
let quat_r: Quat = [0.18257418567011074, 0.3651483713402215, 0.5477225570103322, 0.730296742680443];
let vec_a: Vec4 = [1., 2., 3., 4.];
let result = transform_quat(&mut out, &vec_a, &quat_r);
assert_eq!([1., 2., 3., 4.], out);
assert_eq!(result, out);
}
#[test]
fn zero_out_vec3() {
let mut vec_a: Vec4 = [1., 2., 3., 4.];
let result = zero(&mut vec_a);
assert_eq!(vec_a, [0., 0., 0., 0.]);
assert_eq!(result, vec_a);
}
#[test]
fn get_vec4_string() {
let vec_a: Vec4 = [1., 2., 3., 4.];
let str_a = string(&vec_a);
assert_eq!("vec4(1, 2, 3, 4)".to_string(), str_a);
}
#[test]
fn vec4_are_exact_equal() {
let vec4_a: Vec4 = [0., 1., 2., 3.];
let vec4_b: Vec4 = [0., 1., 2., 3.];
let r0 = exact_equals(&vec4_a, &vec4_b);
assert!(r0);
}
#[test]
fn vec4_are_not_exact_equal() {
let vec4_a: Vec4 = [0., 1., 2., 3.];
let vec4_b: Vec4 = [1., 2., 3., 4.];
let r0 = exact_equals(&vec4_a, &vec4_b);
assert!(!r0);
}
#[test]
fn vec4_are_equal() {
let vec4_a: Vec4 = [0., 1., 2., 3.];
let vec4_b: Vec4 = [0., 1., 2., 3.];
let r0 = equals(&vec4_a, &vec4_b);
assert!(r0);
}
#[test]
fn vec4_are_equal_enough() {
let vec4_a: Vec4 = [0., 1., 2., 3.];
let vec4_b: Vec4 = [1_f32*10_f32.powi(-16), 1., 2., 3.];
let r0 = equals(&vec4_a, &vec4_b);
assert!(r0);
}
#[test]
fn vec4_are_not_equal() {
let vec4_a: Vec4 = [0., 1., 2., 3.];
let vec4_b: Vec4 = [1., 2., 3., 4.];
let r0 = equals(&vec4_a, &vec4_b);
assert!(!r0);
}
} |
use crate::{Alphabet, LineModification, TextModifier};
use std::fmt::Debug;
impl LineModification {
// Changed(usize, String, String),
//Insert(usize, String),
pub fn get_line_number(&self) -> usize {
match self {
Self::Insert(line_number, _) => *line_number,
Self::Changed(line_number, _, _) => *line_number,
Self::Remove(line_number) => *line_number,
}
}
}
impl Debug for LineModification {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Insert(line_number, line_inserted) => f.write_fmt(format_args!(
"Inserted a line at {}, the line was {}",
line_number + 1,
line_inserted
)),
Self::Changed(line_number, original_line, new_line) => f.write_fmt(format_args!(
"Changed the line at {}, the original line was '{}', the new line is '{}'",
line_number + 1,
original_line,
new_line
)),
Self::Remove(line_number) => {
f.write_fmt(format_args!("Line {} was removed", line_number + 1))
}
}
}
}
impl TextModifier {
pub fn new(original_lines: Vec<String>) -> TextModifier {
TextModifier {
original: original_lines.clone(),
modified: original_lines,
changes: Vec::new(),
lines_modified: Vec::new(),
}
}
pub fn insert_line(&mut self, line_number: usize, text: String) {
self.modified.insert(line_number, text.clone());
self.changes
.push(LineModification::Insert(line_number, text));
}
pub fn change_line(&mut self, line_number: usize, text: String) {
let mut line_text = text;
std::mem::swap(&mut self.modified[line_number], &mut line_text);
self.changes.push(LineModification::Changed(
line_number,
self.modified[line_number].clone(),
line_text,
));
}
pub fn remove_line(&mut self, line_number: usize) {
self.modified.remove(line_number);
self.changes.push(LineModification::Remove(line_number));
}
pub fn insert_random_line(&mut self, rng: &mut impl rand::Rng) {
let line_number = self.get_unique_random_line(rng);
let actual_line_number = self.get_actual_line_number(line_number);
let latin = Alphabet::Latin;
let line_to_insert = latin.get_random_line(rng);
println!("Adjusted line number is {}", actual_line_number);
self.modified
.insert(actual_line_number, line_to_insert.clone());
self.changes
.push(LineModification::Insert(line_number, line_to_insert));
}
pub fn remove_random_line(&mut self, rng: &mut impl rand::Rng) {
let line_number = self.get_unique_random_line(rng);
let actual_line_number = self.get_actual_line_number(line_number);
println!("Adjusted line number is {}", actual_line_number);
self.modified.remove(actual_line_number);
// We store the line number as it would apply to the original document
self.changes.push(LineModification::Remove(line_number));
}
fn get_unique_random_line(&self, rng: &mut impl rand::Rng) -> usize {
let mut random_number = rng.gen_range(0, self.modified.len());
let mut attempt: usize = 0;
while self
.changes
.iter()
.any(|change| change.get_line_number() == random_number)
{
random_number = rng.gen_range(0, self.modified.len());
attempt += 1;
debug_assert!(attempt < 1000000);
}
random_number
}
fn get_actual_line_number(&self, line_number: usize) -> usize {
let adjusted_line = self
.changes
.iter()
.filter(|&change| match change {
LineModification::Changed(_, _, _) => false,
LineModification::Remove(line_removed) => {
if line_number >= *line_removed {
true
} else {
false
}
}
LineModification::Insert(line_inserted, _) => {
if line_number >= *line_inserted {
true
} else {
false
}
}
})
.fold(line_number, |total_offset, change| match change {
LineModification::Changed(_, _, _) => 0,
LineModification::Remove(_) => total_offset - 1,
LineModification::Insert(_, _) => total_offset + 1,
});
adjusted_line
}
pub fn change_random_line(&mut self, rng: &mut impl rand::Rng) {
// We always use the line number based on the original file not the one being changed
// This means we need to adjust the line number by the operations that have already occurred
// For each removal that occured before this line number we subtract one, for each insertion that occurred before this line number we add one
let line_number = self.get_unique_random_line(rng);
let actual_line_number = self.get_actual_line_number(line_number);
let alpha = Alphabet::Latin;
let mut new_text = alpha.get_random_line(rng);
// let final_line = line_number + total_offset;
println!("Adjusted line number is {}", actual_line_number);
std::mem::swap(&mut self.modified[actual_line_number], &mut new_text);
// so new_text is now the original line
self.changes.push(LineModification::Changed(
line_number,
new_text,
self.modified[actual_line_number].clone(),
));
}
pub fn get_modified_lines(self) -> (Vec<String>, Vec<LineModification>) {
// TODO: The history needs to be adjusted here since a remove at line 6 followed by an insert an line 6 looks like a line 6 change to any follow up algorithm
(self.modified, self.changes)
}
}
#[cfg(test)]
mod tests {
use crate::{LineModification, TestSpace, TestSpaceFile, TextModifier};
use std::collections::HashSet;
#[test]
fn test_random_number_uniqueness() {
let mut rng = rand::thread_rng();
let mut ts = TestSpace::new();
let text_file = ts.create_random_text_file(10);
let mut tsf = TestSpaceFile::from(text_file);
let original_lines = tsf.read_lines();
let mut tracker = HashSet::new();
let mut modifier = TextModifier::new(original_lines);
for line in 0..10 {
let random = modifier.get_unique_random_line(&mut rng);
println!("Random number is {}", random);
// Add that random number to the numbers that can't be reproduced
let fake_entry = LineModification::Remove(random);
modifier.changes.push(fake_entry);
if tracker.contains(&random) {
panic!("Non unique random number generated")
}
tracker.insert(random);
}
}
#[test]
fn test_change_random_line() {
let mut rng = rand::thread_rng();
let mut ts = TestSpace::new();
let text_file = ts.create_random_text_file(10);
let mut tsf = TestSpaceFile::from(text_file);
let original_lines = tsf.read_lines();
let mut modifier = TextModifier::new(original_lines.clone());
modifier.change_random_line(&mut rng);
let (modified_lines, history) = modifier.get_modified_lines();
let line_modified = history
.iter()
.map(|line| line.get_line_number())
.next()
.unwrap();
for line in 0..original_lines.iter().len() {
if line == line_modified {
assert_ne!(original_lines[line], modified_lines[line])
} else {
assert_eq!(original_lines[line], modified_lines[line]);
}
}
}
#[test]
fn test_remove_random_line() {
let mut rng = rand::thread_rng();
let mut ts = TestSpace::new();
let text_file = ts.create_random_text_file(10);
let mut tsf = TestSpaceFile::from(text_file);
let original_lines = tsf.read_lines();
let mut modifier = TextModifier::new(original_lines.clone());
modifier.remove_random_line(&mut rng);
let (modified_lines, history) = modifier.get_modified_lines();
let line_removed = history
.iter()
.map(|line| line.get_line_number())
.next()
.unwrap();
println!("Removed line {}", line_removed);
assert!(modified_lines.len() < original_lines.len());
if line_removed < modified_lines.len() {
assert_ne!(modified_lines[line_removed], original_lines[line_removed]);
}
}
#[test]
fn test_insert_random_line() {
let mut rng = rand::thread_rng();
let mut ts = TestSpace::new();
let text_file = ts.create_random_text_file(10);
let mut tsf = TestSpaceFile::from(text_file);
let original_lines = tsf.read_lines();
let mut modifier = TextModifier::new(original_lines.clone());
modifier.insert_random_line(&mut rng);
let (modified_lines, _) = modifier.get_modified_lines();
assert!(modified_lines.len() > original_lines.len());
}
#[test]
fn test_adjusted_line_after_remove() {
let mut rng = rand::thread_rng();
let mut ts = TestSpace::new();
let text_file = ts.create_random_text_file(10);
let mut tsf = TestSpaceFile::from(text_file);
let original_lines = tsf.read_lines();
let mut modifier = TextModifier::new(original_lines.clone());
modifier.remove_random_line(&mut rng);
// let (modified_lines, history) = modifier.get_modified_lines();
let line_removed = modifier
.changes
.iter()
.map(|line| line.get_line_number())
.next()
.unwrap();
println!("Removed line {}", line_removed);
// If the last line wasn't randomly removed we can run the test
if line_removed != original_lines.len() - 1 {
let new_line_number = modifier.get_actual_line_number(line_removed + 1);
assert_eq!(new_line_number, line_removed);
} else {
// the last line was randomly removed so we cant test the actual line of a line after that one
}
}
#[test]
fn test_adjusted_line_after_insert() {
let mut rng = rand::thread_rng();
let mut ts = TestSpace::new();
let text_file = ts.create_random_text_file(10);
let mut tsf = TestSpaceFile::from(text_file);
let original_lines = tsf.read_lines();
let mut modifier = TextModifier::new(original_lines.clone());
modifier.insert_random_line(&mut rng);
// let (modified_lines, history) = modifier.get_modified_lines();
let line_inserted = modifier
.changes
.iter()
.map(|line| line.get_line_number())
.next()
.unwrap();
println!("Inserted line {}", line_inserted);
let new_line_number = modifier.get_actual_line_number(line_inserted);
for line in 0..original_lines.len() {
println!("{} - {}", line, original_lines[line]);
}
let (new_lines, _) = modifier.get_modified_lines();
for line in 0..new_lines.len() {
println!("{} - {}", line, new_lines[line]);
}
assert_eq!(new_line_number, line_inserted + 1);
}
}
|
use std::vec::Vec;
pub fn is_prime(n: i32) -> bool {
if n == 1 {
false
} else {
let lim = (n as f64).sqrt().floor() as i32 + 1;
for i in 2..lim {
if n % i == 0 {
return false;
}
}
true
}
}
pub fn sieve(n: usize) -> Vec<bool> {
let mut flags = vec![true; n];
flags[0] = false;
flags[1] = false;
for i in (4..n).step_by(2) {
flags[i] = false;
}
for i in (3..n).step_by(2) {
if flags[i] {
for j in (i * i..n).step_by(i) {
flags[j] = false;
}
}
}
flags
}
pub fn make_divisors(n: i64) -> Vec<i64> {
let mut div: Vec<i64> = Vec::new();
for i in 1..(n as f64).sqrt().floor() as i64 {
if n % i == 0 {
div.push(i);
div.push(n / i);
}
}
div
}
// from https://qiita.com/osanshouo/items/869bf08e979831ebb662#knuth-%E3%81%AE%E6%96%B9%E6%B3%95
pub fn binom_asc(n: i64, k: i64) -> i64 {
if k == 0 || k == n {
1
} else {
binom_asc(n - 1, k - 1) * n / k
}
}
|
use num_integer::Integer;
#[allow(dead_code)]
fn read_line() -> String {
let mut line = String::new();
std::io::stdin().read_line(&mut line).unwrap();
line.trim_end().to_owned()
}
const N_I: i64 = 10_000;
const N_F: f64 = 10_000.0;
fn main() {
let stdin = read_line();
let mut iter = stdin.split_whitespace();
let x = iter.next().unwrap().parse().unwrap();
let y = iter.next().unwrap().parse().unwrap();
let r = iter.next().unwrap().parse().unwrap();
let solver = Solver::new(x, y, r);
let stdout = solver.solve();
stdout.iter().for_each(|s| {
println!("{}", s);
})
}
struct Solver {
x: i64,
y: i64,
r: i64,
}
impl Solver {
fn new(x: f64, y: f64, r: f64) -> Solver {
let x = (x * N_F).round() as i64;
let y = (y * N_F).round() as i64;
let r = (r * N_F).round() as i64;
Solver { x: x, y: y, r: r }
}
fn solve(&self) -> Vec<String> {
let xi_min = self.x - self.r;
let xi_max = self.x + self.r;
let mut xi = xi_min.div_ceil(&N_I) * N_I;
let mut ans: i64 = 0;
while xi_min <= xi && xi <= xi_max {
let sqrt = ((self.r.pow(2) - (xi - self.x).pow(2)) as f64).sqrt() as i64;
let top = (self.y + sqrt).div_floor(&N_I);
let bottom = (self.y - sqrt).div_ceil(&N_I);
if top >= bottom {
ans += top - bottom + 1;
}
xi += N_I;
}
let mut buf = Vec::new();
buf.push(format!("{}", ans));
buf
}
}
#[test]
fn test_1() {
let solver = Solver::new(0.0, 0.0, 0.0);
assert_eq!(solver.solve(), vec!("1"));
}
#[test]
fn test_2() {
let solver = Solver::new(0.1, 0.1, 0.0);
assert_eq!(solver.solve(), vec!("0"));
}
#[test]
fn test_3() {
let solver = Solver::new(0.1, 0.1, 0.2);
assert_eq!(solver.solve(), vec!("1"));
}
#[test]
fn test_4() {
let solver = Solver::new(100.0, 100.0, 1.0);
assert_eq!(solver.solve(), vec!("5"));
}
#[test]
fn test_5() {
let solver = Solver::new(0.0001, 0.0001, 0.0001);
assert_eq!(solver.solve(), vec!("0"));
}
#[test]
fn test_6() {
let solver = Solver::new(0.0, 0.9999, 0.0001);
assert_eq!(solver.solve(), vec!("1"));
}
|
use no_panic::no_panic;
#[no_panic]
async fn f() {
g().await;
}
async fn g() {}
fn main() {}
|
use std::io::{Read, BufReader, Write};
use std::net::{self, TcpStream};
use std::str::from_utf8;
use std::cell::RefCell;
use std::collections::HashSet;
use url;
use types::{RedisResult, Value, ToRedisArgs, FromRedisValue, from_redis_value, ErrorKind};
use parser::Parser;
use cmd::{cmd, pipe, Pipeline};
#[cfg(unix)]
use std::os::unix::prelude::*;
#[cfg(windows)]
use std::os::windows::prelude::*;
static DEFAULT_PORT: u16 = 6379;
/// Holds the connection information that redis should use for connecting.
#[derive(Clone, Debug)]
pub struct ConnectionInfo {
/// A boxed connection address for where to connect to.
pub host: String,
pub port: u16,
/// The database number to use. This is usually `0`.
pub db: i64,
/// Optionally a password that should be used for connection.
pub passwd: Option<String>,
}
/// Represents a stateful redis TCP connection.
#[derive(Debug)]
pub struct Connection {
con: RefCell<BufReader<TcpStream>>,
db: i64,
work: RefCell<bool>,
}
/// Represents a pubsub connection.
#[derive(Debug)]
pub struct PubSub {
con: Connection,
channels: HashSet<String>,
pchannels: HashSet<String>,
}
/// Represents a pubsub message.
pub struct Msg {
payload: Value,
channel: Value,
pattern: Option<Value>,
}
fn redis_scheme_type_mapper(_scheme: &str) -> url::SchemeType {
url::SchemeType::Relative(DEFAULT_PORT)
}
/// This function takes a redis URL string and parses it into a URL
/// as used by rust-url. This is necessary as the default parser does
/// not understand how redis URLs function.
fn parse_redis_url(input: &str) -> url::ParseResult<url::Url> {
let mut parser = url::UrlParser::new();
parser.scheme_type_mapper(redis_scheme_type_mapper);
match parser.parse(input) {
Ok(result) => {
if result.scheme == "redis" {
Ok(result)
} else {
Err(url::ParseError::InvalidScheme)
}
}
Err(err) => Err(err),
}
}
fn url_to_connection_info(url: url::Url) -> RedisResult<ConnectionInfo> {
Ok(ConnectionInfo {
host: unwrap_or!(url.serialize_host(),
fail!((ErrorKind::InvalidClientConfig, "Missing hostname"))),
port: url.port().unwrap_or(DEFAULT_PORT),
db: match url.serialize_path().unwrap_or("".to_string()).trim_matches('/') {
"" => 0,
path => {
unwrap_or!(path.parse::<i64>().ok(),
fail!((ErrorKind::InvalidClientConfig, "Invalid database number")))
}
},
passwd: url.password().and_then(|pw| Some(pw.to_string())),
})
}
pub fn into_connection_info(info: &str) -> RedisResult<ConnectionInfo> {
match parse_redis_url(info) {
Ok(u) => url_to_connection_info(u),
Err(_) => fail!((ErrorKind::InvalidClientConfig, "Redis URL did not parse")),
}
}
pub fn connect(connection_info: &ConnectionInfo) -> RedisResult<Connection> {
let con = try!(Connection::new(&connection_info));
let mut rv = Connection {
con: RefCell::new(con),
db: connection_info.db,
work: RefCell::new(true),
};
match connection_info.passwd {
Some(ref passwd) => {
match cmd("AUTH").arg(&**passwd).query::<Value>(&mut rv) {
Ok(Value::Okay) => {}
Err(err) => {
if err.kind() != ErrorKind::ResponseError ||
err.extension_error_detail()
.unwrap_or("")
.find("but no password is set")
.is_none() {
fail!((ErrorKind::AuthenticationFailed, "Password authentication failed"));
}
}
_ => {
fail!((ErrorKind::AuthenticationFailed, "Password authentication failed"));
}
}
}
None => {}
}
if connection_info.db != 0 {
match cmd("SELECT").arg(connection_info.db).query::<Value>(&mut rv) {
Ok(Value::Okay) => {}
_ => fail!((ErrorKind::ResponseError, "Redis server refused to switch database")),
}
}
Ok(rv)
}
pub fn connect_pubsub(connection_info: &ConnectionInfo) -> RedisResult<PubSub> {
Ok(PubSub {
con: try!(connect(connection_info)),
channels: HashSet::new(),
pchannels: HashSet::new(),
})
}
impl Connection {
#[cfg(windows)]
fn get_fd(tcp: &TcpStream) -> i32 {
tcp.as_raw_socket() as i32
}
#[cfg(unix)]
fn get_fd(tcp: &TcpStream) -> i32 {
tcp.as_raw_fd() as i32
}
#[cfg(windows)]
fn from_fd(fd: i32) -> TcpStream {
unsafe {
TcpStream::from_raw_socket(fd as RawSocket)
}
}
#[cfg(unix)]
fn from_fd(fd: i32) -> TcpStream {
unsafe {
TcpStream::from_raw_fd(fd as RawFd)
}
}
pub fn new(addr: &ConnectionInfo) -> RedisResult<BufReader<TcpStream>> {
let tcp = try!(TcpStream::connect((&*addr.host, addr.port)));
Ok(BufReader::new(tcp))
}
pub fn send_bytes(&self, bytes: &[u8]) -> RedisResult<Value> {
let mut buffer = self.con.borrow_mut();
let w = buffer.get_mut() as &mut Write;
try!(w.write(bytes));
Ok(Value::Okay)
}
pub fn read_response(&self) -> RedisResult<Value> {
let mut buffer = self.con.borrow_mut();
let result = Parser::new(buffer.get_mut() as &mut Read).parse_value();
// shutdown connection on protocol error
match result {
Err(ref e) if e.kind() == ErrorKind::PatternError => {
try!(buffer.get_mut().shutdown(net::Shutdown::Both));
*self.work.borrow_mut() = false;
}
_ => (),
}
result
}
pub fn get_connection_fd(&self) -> i32 {
let buffer = self.con.borrow();
let tcp = buffer.get_ref();
Connection::get_fd(tcp)
}
pub fn is_work(&self) -> bool {
*self.work.borrow()
}
pub fn try_clone(&self) -> RedisResult<Connection> {
let tcp = Connection::from_fd(Connection::get_fd(self.con.borrow().get_ref()));
Ok(Connection {
con: RefCell::new(BufReader::new(tcp)),
db: self.db,
work: RefCell::new(self.is_work()),
})
}
}
/// Implements the "stateless" part of the connection interface that is used by the
/// different objects in redis-rs. Primarily it obviously applies to `Connection`
/// object but also some other objects implement the interface (for instance
/// whole clients or certain redis results).
///
/// Generally clients and connections (as well as redis results of those) implement
/// this trait. Actual connections provide more functionality which can be used
/// to implement things like `PubSub` but they also can modify the intrinsic
/// state of the TCP connection. This is not possible with `ConnectionLike`
/// implementors because that functionality is not exposed.
pub trait ConnectionLike {
/// Sends an already encoded (packed) command into the TCP socket and
/// reads the single response from it.
fn req_packed_command(&self, cmd: &[u8]) -> RedisResult<Value>;
/// Sends multiple already encoded (packed) command into the TCP socket
/// and reads `count` responses from it. This is used to implement
/// pipelining.
fn req_packed_commands(&self,
cmd: &[u8],
offset: usize,
count: usize)
-> RedisResult<Vec<Value>>;
/// Returns the database this connection is bound to. Note that this
/// information might be unreliable because it's initially cached and
/// also might be incorrect if the connection like object is not
/// actually connected.
fn get_db(&self) -> i64;
}
impl ConnectionLike for Connection {
fn req_packed_command(&self, cmd: &[u8]) -> RedisResult<Value> {
try!(self.send_bytes(cmd));
self.read_response()
}
fn req_packed_commands(&self,
cmd: &[u8],
offset: usize,
count: usize)
-> RedisResult<Vec<Value>> {
try!(self.send_bytes(cmd));
let mut rv = vec![];
for idx in 0..(offset + count) {
let item = try!(self.read_response());
if idx >= offset {
rv.push(item);
}
}
Ok(rv)
}
fn get_db(&self) -> i64 {
self.db
}
}
/// The pubsub object provides convenient access to the redis pubsub
/// system. Once created you can subscribe and unsubscribe from channels
/// and listen in on messages.
///
/// Example:
///
/// ```rust,no_run
/// # fn do_something() -> td_rredis::RedisResult<()> {
/// let client = try!(td_rredis::Client::open("redis://127.0.0.1/"));
/// let mut pubsub = try!(client.get_pubsub());
/// try!(pubsub.subscribe("channel_1"));
/// try!(pubsub.subscribe("channel_2"));
///
/// loop {
/// let msg = try!(pubsub.get_message());
/// let payload : String = try!(msg.get_payload());
/// println!("channel '{}': {}", msg.get_channel_name(), payload);
/// }
/// # }
/// ```
impl PubSub {
/// Subscribes to a new channel.
pub fn subscribe(&mut self, channel: String) -> RedisResult<()> {
self.subscribes(vec![channel])
}
pub fn subscribes(&mut self, channels: Vec<String>) -> RedisResult<()> {
let mut cmd = cmd("SUBSCRIBE");
for channel in &channels {
cmd.arg(&**channel);
}
let _: () = try!(cmd.query(&self.con));
self.channels.extend(channels);
Ok(())
}
/// Subscribes to a new channel with a pattern.
pub fn psubscribe(&mut self, channel: String) -> RedisResult<()> {
self.psubscribes(vec![channel])
}
/// Subscribes to a new channel with a pattern.
pub fn psubscribes(&mut self, channels: Vec<String>) -> RedisResult<()> {
let mut cmd = cmd("PSUBSCRIBE");
for channel in &channels {
cmd.arg(&**channel);
}
let _: () = try!(cmd.query(&self.con));
self.pchannels.extend(channels);
Ok(())
}
/// Unsubscribes from a channel.
pub fn unsubscribe(&mut self, channel: String) -> RedisResult<()> {
self.unsubscribes(vec![channel])
}
/// Unsubscribes from a channel.
pub fn unsubscribes(&mut self, channels: Vec<String>) -> RedisResult<()> {
let mut cmd = cmd("UNSUBSCRIBE");
for channel in &channels {
cmd.arg(&**channel);
}
let _: () = try!(cmd.query(&self.con));
for channel in channels {
self.channels.remove(&channel);
}
Ok(())
}
/// Unsubscribes from a channel.
pub fn punsubscribe(&mut self, channel: String) -> RedisResult<()> {
self.punsubscribes(vec![channel])
}
/// Unsubscribes from a channel.
pub fn punsubscribes(&mut self, channels: Vec<String>) -> RedisResult<()> {
let mut cmd = cmd("PUNSUBSCRIBE");
for channel in &channels {
cmd.arg(&**channel);
}
let _: () = try!(cmd.query(&self.con));
for channel in channels {
self.pchannels.remove(&channel);
}
Ok(())
}
pub fn try_clone(&self) -> RedisResult<PubSub> {
Ok(PubSub {
con: try!(self.con.try_clone()),
channels: self.channels.clone(),
pchannels: self.pchannels.clone(),
})
}
pub fn is_work(&self) -> bool {
self.con.is_work()
}
pub fn get_connection_fd(&self) -> i32 {
self.con.get_connection_fd()
}
/// Fetches the next message from the pubsub connection. Blocks until
/// a message becomes available. This currently does not provide a
/// wait not to block :(
///
/// The message itself is still generic and can be converted into an
/// appropriate type through the helper methods on it.
pub fn get_message(&self) -> RedisResult<Msg> {
loop {
let raw_msg: Vec<Value> = try!(from_redis_value(&try!(self.con.read_response())));
let mut iter = raw_msg.into_iter();
let msg_type: String = try!(from_redis_value(&unwrap_or!(iter.next(), continue)));
let mut pattern = None;
let payload;
let channel;
if msg_type == "message" {
channel = unwrap_or!(iter.next(), continue);
payload = unwrap_or!(iter.next(), continue);
} else if msg_type == "pmessage" {
pattern = Some(unwrap_or!(iter.next(), continue));
channel = unwrap_or!(iter.next(), continue);
payload = unwrap_or!(iter.next(), continue);
} else {
continue;
}
return Ok(Msg {
payload: payload,
channel: channel,
pattern: pattern,
});
}
}
}
/// This holds the data that comes from listening to a pubsub
/// connection. It only contains actual message data.
impl Msg {
/// Returns the channel this message came on.
pub fn get_channel<T: FromRedisValue>(&self) -> RedisResult<T> {
from_redis_value(&self.channel)
}
/// Convenience method to get a string version of the channel. Unless
/// your channel contains non utf-8 bytes you can always use this
/// method. If the channel is not a valid string (which really should
/// not happen) then the return value is `"?"`.
pub fn get_channel_name(&self) -> &str {
match self.channel {
Value::Data(ref bytes) => from_utf8(bytes).unwrap_or("?"),
_ => "?",
}
}
/// Returns the message's payload in a specific format.
pub fn get_payload<T: FromRedisValue>(&self) -> RedisResult<T> {
from_redis_value(&self.payload)
}
/// Returns the bytes that are the message's payload. This can be used
/// as an alternative to the `get_payload` function if you are interested
/// in the raw bytes in it.
pub fn get_payload_bytes(&self) -> &[u8] {
match self.channel {
Value::Data(ref bytes) => bytes,
_ => b"",
}
}
/// Returns true if the message was constructed from a pattern
/// subscription.
pub fn from_pattern(&self) -> bool {
self.pattern.is_some()
}
/// If the message was constructed from a message pattern this can be
/// used to find out which one. It's recommended to match against
/// an `Option<String>` so that you do not need to use `from_pattern`
/// to figure out if a pattern was set.
pub fn get_pattern<T: FromRedisValue>(&self) -> RedisResult<T> {
match self.pattern {
None => from_redis_value(&Value::Nil),
Some(ref x) => from_redis_value(x),
}
}
}
/// This function simplifies transaction management slightly. What it
/// does is automatically watching keys and then going into a transaction
/// loop util it succeeds. Once it goes through the results are
/// returned.
///
/// To use the transaction two pieces of information are needed: a list
/// of all the keys that need to be watched for modifications and a
/// closure with the code that should be execute in the context of the
/// transaction. The closure is invoked with a fresh pipeline in atomic
/// mode. To use the transaction the function needs to return the result
/// from querying the pipeline with the connection.
///
/// The end result of the transaction is then available as the return
/// value from the function call.
///
/// Example:
///
/// ```rust,no_run
/// use td_rredis::{Commands, PipelineCommands};
/// # fn do_something() -> td_rredis::RedisResult<()> {
/// # let client = td_rredis::Client::open("redis://127.0.0.1/").unwrap();
/// # let con = client.get_connection().unwrap();
/// let key = "the_key";
/// let (new_val,) : (isize,) = try!(td_rredis::transaction(&con, &[key], |pipe| {
/// let old_val : isize = try!(con.get(key));
/// pipe
/// .set(key, old_val + 1).ignore()
/// .get(key).query(&con)
/// }));
/// println!("The incremented number is: {}", new_val);
/// # Ok(()) }
/// ```
pub fn transaction<K: ToRedisArgs,
T: FromRedisValue,
F: FnMut(&mut Pipeline) -> RedisResult<Option<T>>>
(con: &Connection,
keys: &[K],
func: F)
-> RedisResult<T> {
let mut func = func;
loop {
let _: () = try!(cmd("WATCH").arg(keys).query(con));
let mut p = pipe();
let response: Option<T> = try!(func(p.atomic()));
match response {
None => {
continue;
}
Some(response) => {
// make sure no watch is left in the connection, even if
// someone forgot to use the pipeline.
let _: () = try!(cmd("UNWATCH").query(con));
return Ok(response);
}
}
}
}
|
pub mod camera;
pub mod game;
|
use crate::common::run_command;
use crate::{Browser, BrowserOptions, Error, ErrorKind, Result, TargetType};
use log::trace;
use std::io::{BufRead, BufReader};
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf, MAIN_SEPARATOR};
use std::process::{Command, Stdio};
macro_rules! try_browser {
( $options: expr, $name:expr, $( $arg:expr ),+ ) => {
for_matching_path($name, |pb| {
let mut cmd = Command::new(pb);
$(
cmd.arg($arg);
)+
run_command(&mut cmd, !is_text_browser(&pb), $options)
})
}
}
/// Deal with opening of browsers on Linux and *BSD - currently supports only the default browser
///
/// The mechanism of opening the default browser is as follows:
/// 1. Attempt to use $BROWSER env var if available
/// 2. Attempt to use xdg-open
/// 3. Attempt to use window manager specific commands, like gnome-open, kde-open etc. incl. WSL
/// 4. Fallback to x-www-browser
pub(super) fn open_browser_internal(
browser: Browser,
target: &TargetType,
options: &BrowserOptions,
) -> Result<()> {
match browser {
Browser::Default => open_browser_default(target, options),
_ => Err(Error::new(
ErrorKind::NotFound,
"only default browser supported",
)),
}
}
/// Open the default browser.
///
/// [BrowserOptions::dry_run] is handled inside [run_command], as all execution paths eventually
/// rely on it to execute.
fn open_browser_default(target: &TargetType, options: &BrowserOptions) -> Result<()> {
let url: &str = target;
// we first try with the $BROWSER env
try_with_browser_env(url, options)
// allow for haiku's open specifically
.or_else(|_| try_haiku(options, url))
// then we try with xdg configuration
.or_else(|_| try_xdg(options, url))
// else do desktop specific stuff
.or_else(|r| match guess_desktop_env() {
"kde" => try_browser!(options, "kde-open", url)
.or_else(|_| try_browser!(options, "kde-open5", url))
.or_else(|_| try_browser!(options, "kfmclient", "newTab", url)),
"gnome" => try_browser!(options, "gio", "open", url)
.or_else(|_| try_browser!(options, "gvfs-open", url))
.or_else(|_| try_browser!(options, "gnome-open", url)),
"mate" => try_browser!(options, "gio", "open", url)
.or_else(|_| try_browser!(options, "gvfs-open", url))
.or_else(|_| try_browser!(options, "mate-open", url)),
"xfce" => try_browser!(options, "exo-open", url)
.or_else(|_| try_browser!(options, "gio", "open", url))
.or_else(|_| try_browser!(options, "gvfs-open", url)),
"wsl" => try_wsl(options, target),
"flatpak" => try_flatpak(options, target),
_ => Err(r),
})
// at the end, we'll try x-www-browser and return the result as is
.or_else(|_| try_browser!(options, "x-www-browser", url))
// if all above failed, map error to not found
.map_err(|_| {
Error::new(
ErrorKind::NotFound,
"No valid browsers detected. You can specify one in BROWSERS environment variable",
)
})
// and convert a successful result into a ()
.map(|_| ())
}
fn try_with_browser_env(url: &str, options: &BrowserOptions) -> Result<()> {
// $BROWSER can contain ':' delimited options, each representing a potential browser command line
for browser in std::env::var("BROWSER")
.unwrap_or_else(|_| String::from(""))
.split(':')
{
if !browser.is_empty() {
// each browser command can have %s to represent URL, while %c needs to be replaced
// with ':' and %% with '%'
let cmdline = browser
.replace("%s", url)
.replace("%c", ":")
.replace("%%", "%");
let cmdarr: Vec<&str> = cmdline.split_ascii_whitespace().collect();
let browser_cmd = cmdarr[0];
let env_exit = for_matching_path(browser_cmd, |pb| {
let mut cmd = Command::new(pb);
for arg in cmdarr.iter().skip(1) {
cmd.arg(arg);
}
if !browser.contains("%s") {
// append the url as an argument only if it was not already set via %s
cmd.arg(url);
}
run_command(&mut cmd, !is_text_browser(pb), options)
});
if env_exit.is_ok() {
return Ok(());
}
}
}
Err(Error::new(
ErrorKind::NotFound,
"No valid browser configured in BROWSER environment variable",
))
}
/// Check if we are inside WSL on Windows, and interoperability with Windows tools is
/// enabled.
fn is_wsl() -> bool {
// we should check in procfs only on linux, as for non-linux it will likely be
// a disk hit, which we should avoid.
if cfg!(target_os = "linux") {
// we check if interop with windows tools is allowed, as if it isn't, we won't
// be able to invoke windows commands anyways.
// See: https://learn.microsoft.com/en-us/windows/wsl/filesystems#disable-interoperability
if let Ok(s) = std::fs::read_to_string("/proc/sys/fs/binfmt_misc/WSLInterop") {
s.contains("enabled")
} else {
false
}
} else {
// we short-circuit and return false on non-linux
false
}
}
/// Check if we're running inside Flatpak
#[inline]
fn is_flatpak() -> bool {
std::env::var("container")
.map(|x| x.eq_ignore_ascii_case("flatpak"))
.unwrap_or(false)
}
/// Detect the desktop environment
fn guess_desktop_env() -> &'static str {
let unknown = "unknown";
let xcd: String = std::env::var("XDG_CURRENT_DESKTOP")
.unwrap_or_else(|_| unknown.into())
.to_ascii_lowercase();
let dsession: String = std::env::var("DESKTOP_SESSION")
.unwrap_or_else(|_| unknown.into())
.to_ascii_lowercase();
if is_flatpak() {
"flatpak"
} else if xcd.contains("gnome") || xcd.contains("cinnamon") || dsession.contains("gnome") {
// GNOME and its derivatives
"gnome"
} else if xcd.contains("kde")
|| std::env::var("KDE_FULL_SESSION").is_ok()
|| std::env::var("KDE_SESSION_VERSION").is_ok()
{
// KDE: https://userbase.kde.org/KDE_System_Administration/Environment_Variables#Automatically_Set_Variables
"kde"
} else if xcd.contains("mate") || dsession.contains("mate") {
// We'll treat MATE as distinct from GNOME due to mate-open
"mate"
} else if xcd.contains("xfce") || dsession.contains("xfce") {
// XFCE
"xfce"
} else if is_wsl() {
// WSL
"wsl"
} else {
// All others
unknown
}
}
/// Open browser in WSL environments
fn try_wsl(options: &BrowserOptions, target: &TargetType) -> Result<()> {
match target.0.scheme() {
"http" | "https" => {
let url: &str = target;
try_browser!(options, "cmd.exe", "/c", "start", url)
.or_else(|_| try_browser!(options, "powershell.exe", "Start", url))
.or_else(|_| try_browser!(options, "wsl-open", url))
}
#[cfg(all(
target_os = "linux",
not(feature = "hardened"),
not(feature = "disable-wsl")
))]
"file" => {
// we'll need to detect the default browser and then invoke it
// with wsl translated path
let wc = wsl::get_wsl_win_config()?;
let mut cmd = if wc.powershell_path.is_some() {
wsl::get_wsl_windows_browser_ps(&wc, target)
} else {
wsl::get_wsl_windows_browser_cmd(&wc, target)
}?;
run_command(&mut cmd, true, options)
}
_ => Err(Error::new(ErrorKind::NotFound, "invalid browser")),
}
}
/// Open browser in Flatpak environments
fn try_flatpak(options: &BrowserOptions, target: &TargetType) -> Result<()> {
match target.0.scheme() {
"http" | "https" => {
let url: &str = target;
// we assume xdg-open to be present, given that it's a part of standard
// runtime & SDK of flatpak
try_browser!(options, "xdg-open", url)
}
// we support only http urls under Flatpak to adhere to the defined
// Consistent Behaviour, as effectively DBUS is used interally, and
// there doesn't seem to be a way for us to determine actual browser
_ => Err(Error::new(ErrorKind::NotFound, "only http urls supported")),
}
}
/// Handle Haiku explicitly, as it uses an "open" command, similar to macos
/// but on other Unixes, open ends up translating to shell open fd
fn try_haiku(options: &BrowserOptions, url: &str) -> Result<()> {
if cfg!(target_os = "haiku") {
try_browser!(options, "open", url).map(|_| ())
} else {
Err(Error::new(ErrorKind::NotFound, "Not on haiku"))
}
}
/// Dig into XDG settings (if xdg is available) to force it to open the browser, instead of
/// the default application
fn try_xdg(options: &BrowserOptions, url: &str) -> Result<()> {
// run: xdg-settings get default-web-browser
let browser_name_os = for_matching_path("xdg-settings", |pb| {
Command::new(pb)
.args(["get", "default-web-browser"])
.stdin(Stdio::null())
.stderr(Stdio::null())
.output()
})
.map_err(|_| Error::new(ErrorKind::NotFound, "unable to determine xdg browser"))?
.stdout;
// convert browser name to a utf-8 string and trim off the trailing newline
let browser_name = String::from_utf8(browser_name_os)
.map_err(|_| Error::new(ErrorKind::NotFound, "invalid default browser name"))?
.trim()
.to_owned();
if browser_name.is_empty() {
return Err(Error::new(ErrorKind::NotFound, "no default xdg browser"));
}
trace!("found xdg browser: {:?}", &browser_name);
// search for the config file corresponding to this browser name
let mut config_found = false;
let app_suffix = "applications";
for xdg_dir in get_xdg_dirs().iter_mut() {
let mut config_path = xdg_dir.join(app_suffix).join(&browser_name);
trace!("checking for xdg config at {:?}", config_path);
let mut metadata = config_path.metadata();
if metadata.is_err() && browser_name.contains('-') {
// as per the spec, we need to replace '-' with /
let child_path = browser_name.replace('-', "/");
config_path = xdg_dir.join(app_suffix).join(child_path);
metadata = config_path.metadata();
}
if metadata.is_ok() {
// we've found the config file, so we try running using that
config_found = true;
match open_using_xdg_config(&config_path, options, url) {
Ok(x) => return Ok(x), // return if successful
Err(err) => {
// if we got an error other than NotFound, then we short
// circuit, and do not try any more options, else we
// continue to try more
if err.kind() != ErrorKind::NotFound {
return Err(err);
}
}
}
}
}
if config_found {
Err(Error::new(ErrorKind::Other, "xdg-open failed"))
} else {
Err(Error::new(ErrorKind::NotFound, "no valid xdg config found"))
}
}
/// Opens `url` using xdg configuration found in `config_path`
///
/// See https://specifications.freedesktop.org/desktop-entry-spec/latest for details
fn open_using_xdg_config(config_path: &PathBuf, options: &BrowserOptions, url: &str) -> Result<()> {
let file = std::fs::File::open(config_path)?;
let mut in_desktop_entry = false;
let mut hidden = false;
let mut cmdline: Option<String> = None;
let mut requires_terminal = false;
// we capture important keys under the [Desktop Entry] section, as defined under:
// https://specifications.freedesktop.org/desktop-entry-spec/latest/ar01s06.html
for line in BufReader::new(file).lines().flatten() {
if line == "[Desktop Entry]" {
in_desktop_entry = true;
} else if line.starts_with('[') {
in_desktop_entry = false;
} else if in_desktop_entry && !line.starts_with('#') {
if let Some(idx) = line.find('=') {
let key = &line[..idx];
let value = &line[idx + 1..];
match key {
"Exec" => cmdline = Some(value.to_owned()),
"Hidden" => hidden = value == "true",
"Terminal" => requires_terminal = value == "true",
_ => (), // ignore
}
}
}
}
if hidden {
// we ignore this config if it was marked hidden/deleted
return Err(Error::new(ErrorKind::NotFound, "xdg config is hidden"));
}
if let Some(cmdline) = cmdline {
// we have a valid configuration
let cmdarr: Vec<&str> = cmdline.split_ascii_whitespace().collect();
let browser_cmd = cmdarr[0];
for_matching_path(browser_cmd, |pb| {
let mut cmd = Command::new(pb);
let mut url_added = false;
for arg in cmdarr.iter().skip(1) {
match *arg {
"%u" | "%U" | "%f" | "%F" => {
url_added = true;
cmd.arg(url)
}
_ => cmd.arg(arg),
};
}
if !url_added {
// append the url as an argument only if it was not already set
cmd.arg(url);
}
run_command(&mut cmd, !requires_terminal, options)
})
} else {
// we don't have a valid config
Err(Error::new(ErrorKind::NotFound, "not a valid xdg config"))
}
}
/// Get the list of directories in which the desktop file needs to be searched
fn get_xdg_dirs() -> Vec<PathBuf> {
let mut xdg_dirs: Vec<PathBuf> = Vec::new();
let data_home = std::env::var("XDG_DATA_HOME")
.ok()
.map(PathBuf::from)
.filter(|path| path.is_absolute())
.or_else(|| home::home_dir().map(|path| path.join(".local/share")));
if let Some(data_home) = data_home {
xdg_dirs.push(data_home);
}
if let Ok(data_dirs) = std::env::var("XDG_DATA_DIRS") {
for d in data_dirs.split(':') {
xdg_dirs.push(PathBuf::from(d));
}
} else {
xdg_dirs.push(PathBuf::from("/usr/local/share"));
xdg_dirs.push(PathBuf::from("/usr/share"));
}
xdg_dirs
}
/// Returns true if specified command refers to a known list of text browsers
fn is_text_browser(pb: &Path) -> bool {
for browser in TEXT_BROWSERS.iter() {
if pb.ends_with(browser) {
return true;
}
}
false
}
fn for_matching_path<F, T>(name: &str, op: F) -> Result<T>
where
F: FnOnce(&PathBuf) -> Result<T>,
{
let err = Err(Error::new(ErrorKind::NotFound, "command not found"));
// if the name already includes path separator, we should not try to do a PATH search on it
// as it's likely an absolutely or relative name, so we treat it as such.
if name.contains(MAIN_SEPARATOR) {
let pb = std::path::PathBuf::from(name);
if let Ok(metadata) = pb.metadata() {
if metadata.is_file() && metadata.permissions().mode() & 0o111 != 0 {
return op(&pb);
}
} else {
return err;
}
} else {
// search for this name inside PATH
if let Ok(path) = std::env::var("PATH") {
for entry in path.split(':') {
let mut pb = std::path::PathBuf::from(entry);
pb.push(name);
if let Ok(metadata) = pb.metadata() {
if metadata.is_file() && metadata.permissions().mode() & 0o111 != 0 {
return op(&pb);
}
}
}
}
}
// return the not found err, if we didn't find anything above
err
}
static TEXT_BROWSERS: [&str; 9] = [
"lynx", "links", "links2", "elinks", "w3m", "eww", "netrik", "retawq", "curl",
];
#[cfg(test)]
mod tests_xdg {
use super::*;
use std::fs::File;
use std::io::Write;
fn get_temp_path(name: &str, suffix: &str) -> String {
let pid = std::process::id();
std::env::temp_dir()
.join(format!("{}.{}.{}", name, pid, suffix))
.into_os_string()
.into_string()
.expect("failed to convert into string")
}
#[test]
fn test_xdg_open_local_file() {
let _ = env_logger::try_init();
// ensure flag file is not existing
let flag_path = get_temp_path("test_xdg", "flag");
let _ = std::fs::remove_file(&flag_path);
// create browser script
let txt_path = get_temp_path("test_xdf", "txt");
let browser_path = get_temp_path("test_xdg", "browser");
{
let mut browser_file =
File::create(&browser_path).expect("failed to create browser file");
let _ = browser_file.write_fmt(format_args!(
r#"#!/bin/bash
if [ "$1" != "p1" ]; then
echo "1st parameter should've been p1" >&2
exit 1
elif [ "$2" != "{}" ]; then
echo "2nd parameter should've been {}" >&2
exit 1
elif [ "$3" != "p3" ]; then
echo "3rd parameter should've been p3" >&2
exit 1
fi
echo "$2" > "{}"
"#,
&txt_path, &txt_path, &flag_path
));
let mut perms = browser_file
.metadata()
.expect("failed to get permissions")
.permissions();
perms.set_mode(0o755);
let _ = browser_file.set_permissions(perms);
}
// create xdg desktop config
let config_path = get_temp_path("test_xdg", "desktop");
{
let mut xdg_file =
std::fs::File::create(&config_path).expect("failed to create xdg desktop file");
let _ = xdg_file.write_fmt(format_args!(
r#"# this line should be ignored
[Desktop Entry]
Exec={} p1 %u p3
[Another Entry]
Exec=/bin/ls
# the above Exec line should be getting ignored
"#,
&browser_path
));
}
// now try opening browser using above desktop config
let result = open_using_xdg_config(
&PathBuf::from(&config_path),
&BrowserOptions::default(),
&txt_path,
);
// we need to wait until the flag file shows up due to the async
// nature of browser invocation
for _ in 0..10 {
if std::fs::read_to_string(&flag_path).is_ok() {
break;
}
std::thread::sleep(std::time::Duration::from_millis(500));
}
std::thread::sleep(std::time::Duration::from_millis(500));
// validate that the flag file contains the url we passed
assert_eq!(
std::fs::read_to_string(&flag_path)
.expect("flag file not found")
.trim(),
&txt_path,
);
assert!(result.is_ok());
// delete all temp files
let _ = std::fs::remove_file(&txt_path);
let _ = std::fs::remove_file(&flag_path);
let _ = std::fs::remove_file(&browser_path);
let _ = std::fs::remove_file(&config_path);
assert!(result.is_ok());
}
}
/// WSL related browser functionality.
///
/// We treat it as a separate submod, to allow for easy logical grouping
/// and to enable/disable based on some feature easily in future.
#[cfg(all(
target_os = "linux",
not(feature = "hardened"),
not(feature = "disable-wsl")
))]
mod wsl {
use crate::common::for_each_token;
use crate::{Result, TargetType};
use std::io::{Error, ErrorKind};
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
pub(super) struct WindowsConfig {
root: PathBuf,
cmd_path: PathBuf,
pub(super) powershell_path: Option<PathBuf>,
}
/// Returns a [WindowsConfig] by iterating over PATH entries. This seems to be
/// the fastest way to determine this.
pub(super) fn get_wsl_win_config() -> Result<WindowsConfig> {
let err_fn = || Error::new(ErrorKind::NotFound, "invalid windows config");
if let Some(path_env) = std::env::var_os("PATH") {
let mut root: Option<PathBuf> = None;
let mut cmd_path: Option<PathBuf> = None;
let mut powershell_path: Option<PathBuf> = None;
for path in std::env::split_paths(&path_env) {
let path_s = path.to_string_lossy().to_ascii_lowercase();
let path_s = path_s.trim_end_matches('/');
if path_s.ends_with("/windows/system32") {
root = Some(std::fs::canonicalize(path.join("../.."))?);
cmd_path = Some(path.join("cmd.exe"));
break;
}
}
if let Some(ref root) = root {
for path in std::env::split_paths(&path_env) {
if path.starts_with(root) {
let pb = path.join("powershell.exe");
if pb.is_file() {
powershell_path = Some(pb);
}
}
}
}
if let Some(root) = root {
let cmd_path = cmd_path.unwrap_or_else(|| (root).join("windows/system32/cmd.exe"));
Ok(WindowsConfig {
root,
cmd_path,
powershell_path,
})
} else {
Err(err_fn())
}
} else {
Err(err_fn())
}
}
/// Try to get default browser command from powershell.exe
pub(super) fn get_wsl_windows_browser_ps(
wc: &WindowsConfig,
url: &TargetType,
) -> Result<Command> {
let err_fn = || Error::new(ErrorKind::NotFound, "powershell.exe error");
let ps_exe = wc.powershell_path.as_ref().ok_or_else(err_fn)?;
let mut cmd = Command::new(ps_exe);
cmd.arg("-NoLogo")
.arg("-NoProfile")
.arg("-NonInteractive")
.arg("-Command")
.arg("-")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::null());
log::debug!("running command: ${:?}", &cmd);
let mut child = cmd.spawn()?;
let mut stdin = child.stdin.take().ok_or_else(err_fn)?;
std::io::Write::write_all(&mut stdin, WSL_PS_SCRIPT.as_bytes())?;
drop(stdin); // flush to stdin, and close
let output_u8 = child.wait_with_output()?;
let output = String::from_utf8_lossy(&output_u8.stdout);
let output = output.trim();
if output.is_empty() {
Err(err_fn())
} else {
parse_wsl_cmdline(wc, output, url)
}
}
/// Try to get default browser command from cmd.exe
pub(super) fn get_wsl_windows_browser_cmd(
wc: &WindowsConfig,
url: &TargetType,
) -> Result<Command> {
let err_fn = || Error::new(ErrorKind::NotFound, "cmd.exe error");
let mut cmd = Command::new(&wc.cmd_path);
cmd.arg("/Q")
.arg("/C")
.arg("ftype http")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null());
log::debug!("running command: ${:?}", &cmd);
let output_u8 = cmd.output()?;
let output = String::from_utf8_lossy(&output_u8.stdout);
let output = output.trim();
if output.is_empty() {
Err(err_fn())
} else {
parse_wsl_cmdline(wc, output, url)
}
}
/// Given the configured command line `cmdline` in registry, and the given `url`,
/// return the appropriate `Command` to invoke
fn parse_wsl_cmdline(wc: &WindowsConfig, cmdline: &str, url: &TargetType) -> Result<Command> {
let mut tokens: Vec<String> = Vec::new();
let filepath = wsl_get_filepath_from_url(wc, url)?;
let fp = &filepath;
for_each_token(cmdline, |token: &str| {
if matches!(token, "%0" | "%1") {
tokens.push(fp.to_owned());
} else {
tokens.push(token.to_string());
}
});
if tokens.is_empty() {
Err(Error::new(ErrorKind::NotFound, "invalid command"))
} else {
let progpath = wsl_path_win2lin(wc, &tokens[0])?;
let mut cmd = Command::new(progpath);
if tokens.len() > 1 {
cmd.args(&tokens[1..]);
}
Ok(cmd)
}
}
fn wsl_get_filepath_from_url(wc: &WindowsConfig, target: &TargetType) -> Result<String> {
let url = &target.0;
if url.scheme() == "file" {
if url.host().is_none() {
let path = url
.to_file_path()
.map_err(|_| Error::new(ErrorKind::NotFound, "invalid path"))?;
wsl_path_lin2win(wc, path)
} else {
Ok(format!("\\\\wsl${}", url.path().replace('/', "\\")))
}
} else {
Ok(url.as_str().to_string())
}
}
/// Converts a windows path to linux `PathBuf`
fn wsl_path_win2lin(wc: &WindowsConfig, path: &str) -> Result<PathBuf> {
let err_fn = || Error::new(ErrorKind::NotFound, "invalid windows path");
if path.len() > 3 {
let pfx = &path[..3];
if matches!(pfx, "C:\\" | "c:\\") {
let win_path = path[3..].replace('\\', "/");
Ok(wc.root.join(win_path))
} else {
Err(err_fn())
}
} else {
Err(err_fn())
}
}
/// Converts a linux path to windows. We using `String` instead of `OsString` as
/// return type because the `OsString` will be different b/w Windows & Linux.
fn wsl_path_lin2win(wc: &WindowsConfig, path: impl AsRef<Path>) -> Result<String> {
let path = path.as_ref();
if let Ok(path) = path.strip_prefix(&wc.root) {
// windows can access this path directly
Ok(format!("C:\\{}", path.as_os_str().to_string_lossy()).replace('/', "\\"))
} else {
// windows needs to access it via network
let wsl_hostname = get_wsl_distro_name(wc)?;
Ok(format!(
"\\\\wsl$\\{}{}",
&wsl_hostname,
path.as_os_str().to_string_lossy()
)
.replace('/', "\\"))
}
}
/// Gets the WSL distro name
fn get_wsl_distro_name(wc: &WindowsConfig) -> Result<String> {
let err_fn = || Error::new(ErrorKind::Other, "unable to determine wsl distro name");
// mostly we should be able to get it from the WSL_DISTRO_NAME env var
if let Ok(wsl_hostname) = std::env::var("WSL_DISTRO_NAME") {
Ok(wsl_hostname)
} else {
// but if not (e.g. if we were running as root), we can invoke
// powershell.exe to determine pwd and from there infer the distro name
let psexe = wc.powershell_path.as_ref().ok_or_else(err_fn)?;
let mut cmd = Command::new(psexe);
cmd.arg("-NoLogo")
.arg("-NoProfile")
.arg("-NonInteractive")
.arg("-Command")
.arg("$loc = Get-Location\nWrite-Output $loc.Path")
.current_dir("/")
.stdin(Stdio::null())
.stderr(Stdio::null());
log::debug!("running command: ${:?}", &cmd);
let output_u8 = cmd.output()?.stdout;
let output = String::from_utf8_lossy(&output_u8);
let output = output.trim_end_matches('\\');
let idx = output.find("::\\\\").ok_or_else(err_fn)?;
Ok((output[idx + 9..]).trim().to_string())
}
}
/// Powershell script to get the default browser command.
///
/// Adapted from https://stackoverflow.com/a/60972216
const WSL_PS_SCRIPT: &str = r#"
$Signature = @"
using System;
using System.Runtime.InteropServices;
using System.Text;
public static class Win32Api
{
[DllImport("Shlwapi.dll", SetLastError = true, CharSet = CharSet.Auto)]
static extern uint AssocQueryString(AssocF flags, AssocStr str, string pszAssoc, string pszExtra,[Out] System.Text.StringBuilder pszOut, ref uint pcchOut);
public static string GetDefaultBrowser()
{
AssocF assocF = AssocF.IsProtocol;
AssocStr association = AssocStr.Command;
string assocString = "http";
uint length = 1024; // we assume 1k is sufficient memory to hold the command
var sb = new System.Text.StringBuilder((int) length);
uint ret = ret = AssocQueryString(assocF, association, assocString, null, sb, ref length);
return (ret != 0) ? null : sb.ToString();
}
[Flags]
internal enum AssocF : uint
{
IsProtocol = 0x1000,
}
internal enum AssocStr
{
Command = 1,
Executable,
}
}
"@
Add-Type -TypeDefinition $Signature
Write-Output $([Win32Api]::GetDefaultBrowser())
"#;
/*#[cfg(test)]
mod tests {
use crate::open;
#[test]
fn test_url() {
let _ = env_logger::try_init();
assert!(open("https://github.com").is_ok());
}
#[test]
fn test_linux_file() {
let _ = env_logger::try_init();
assert!(open("abc.html").is_ok());
}
#[test]
fn test_windows_file() {
let _ = env_logger::try_init();
assert!(open("/mnt/c/T/abc.html").is_ok());
}
}*/
}
|
#[doc = "Register `RCC_RNG2CKSELR` reader"]
pub type R = crate::R<RCC_RNG2CKSELR_SPEC>;
#[doc = "Register `RCC_RNG2CKSELR` writer"]
pub type W = crate::W<RCC_RNG2CKSELR_SPEC>;
#[doc = "Field `RNG2SRC` reader - RNG2SRC"]
pub type RNG2SRC_R = crate::FieldReader;
#[doc = "Field `RNG2SRC` writer - RNG2SRC"]
pub type RNG2SRC_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
impl R {
#[doc = "Bits 0:1 - RNG2SRC"]
#[inline(always)]
pub fn rng2src(&self) -> RNG2SRC_R {
RNG2SRC_R::new((self.bits & 3) as u8)
}
}
impl W {
#[doc = "Bits 0:1 - RNG2SRC"]
#[inline(always)]
#[must_use]
pub fn rng2src(&mut self) -> RNG2SRC_W<RCC_RNG2CKSELR_SPEC, 0> {
RNG2SRC_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "This register is used to control the selection of the kernel clock for the RNG2.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`rcc_rng2ckselr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`rcc_rng2ckselr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct RCC_RNG2CKSELR_SPEC;
impl crate::RegisterSpec for RCC_RNG2CKSELR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`rcc_rng2ckselr::R`](R) reader structure"]
impl crate::Readable for RCC_RNG2CKSELR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`rcc_rng2ckselr::W`](W) writer structure"]
impl crate::Writable for RCC_RNG2CKSELR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets RCC_RNG2CKSELR to value 0"]
impl crate::Resettable for RCC_RNG2CKSELR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
pub trait SampleType: Copy + Default {}
impl SampleType for u8 {}
impl SampleType for i16 {}
impl SampleType for f32 {}
|
// Copyright (c) 2017 oic developers
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! [NOT IMPL]
//! All of these functions are used for getting and setting the various members of the dpiData
//! structure. The members of the structure can be manipulated directly but some languages
//! (such as Go) do not have the ability to manipulate structures containing unions or the ability
//! to process macros. For this reason, none of these functions perform any error checking. They are
//! assumed to be replacements for direct manipulation of the various members of the structure.
use chrono::{DateTime, Duration, TimeZone, UTC};
use odpi::structs::{ODPIData, ODPIDataValueUnion};
use util::ODPIStr;
/// This structure is used for passing data to and from the database for variables and for
/// manipulating object attributes and collection values.
pub struct Data {
/// The ODPI-C data pointer.
data: *mut ODPIData,
}
impl Data {
/// Create a new `Data` struct;
#[doc(hidden)]
pub fn new(is_null: bool, val: ODPIDataValueUnion) -> Data {
let mut odpi_data = ODPIData {
is_null: if is_null { 1 } else { 0 },
value: val,
};
Data { data: &mut odpi_data as *mut ODPIData }
}
/// Get the `data` value.
#[doc(hidden)]
pub fn data(&self) -> *mut ODPIData {
self.data
}
/// Get the value as a boolean when the native type is DPI_NATIVE_TYPE_BOOLEAN.
pub fn as_boolean(&self) -> bool {
unsafe { (*self.data).value.as_boolean == 1 }
}
/// Get the value as an `i64` when the native type is DPI_NATIVE_TYPE_INT64.
pub fn as_int64(&self) -> i64 {
unsafe { (*self.data).value.as_int_64 }
}
/// Get the value as a `u64` when the native type is DPI_NATIVE_TYPE_UINT64.
pub fn as_uint64(&self) -> u64 {
unsafe { (*self.data).value.as_uint_64 }
}
/// Get the value as a `f32` when the native type is DPI_NATIVE_TYPE_FLOAT.
pub fn as_float(&self) -> f32 {
unsafe { (*self.data).value.as_float }
}
/// Get the value as a `f64` when the native type is DPI_NATIVE_TYPE_DOUBLE.
pub fn as_double(&self) -> f64 {
unsafe { (*self.data).value.as_double }
}
/// Get the value as a `String` when the native type is DPI_NATIVE_TYPE_BYTES.
pub fn as_string(&self) -> String {
unsafe {
let odpi_bytes = (*self.data).value.as_bytes;
let odpi_s = ODPIStr::new(odpi_bytes.ptr, odpi_bytes.length);
odpi_s.into()
}
}
/// Get the value as a `UTC` when the native type is DPI_NATIVE_TYPE_TIMESTAMP.
pub fn as_utc(&self) -> DateTime<UTC> {
let odpi_ts = unsafe { (*self.data).value.as_timestamp };
let y = odpi_ts.year as i32;
let m = odpi_ts.month as u32;
let d = odpi_ts.day as u32;
let h = odpi_ts.hour as u32;
let mi = odpi_ts.minute as u32;
let s = odpi_ts.second as u32;
UTC.ymd(y, m, d).and_hms_nano(h, mi, s, odpi_ts.fsecond)
}
/// Get the value as a `Duration` when the native type is DPI_NATIVE_TYPE_INTERVAL_DS.
pub fn as_duration(&self) -> Duration {
let odpi_int_ds = unsafe { (*self.data).value.as_interval_ds };
let mut dur = Duration::days(odpi_int_ds.days as i64);
dur = dur + Duration::hours(odpi_int_ds.hours as i64);
dur = dur + Duration::minutes(odpi_int_ds.minutes as i64);
dur = dur + Duration::seconds(odpi_int_ds.seconds as i64);
dur = dur + Duration::nanoseconds(odpi_int_ds.fseconds as i64);
dur
}
}
impl From<*mut ODPIData> for Data {
fn from(data: *mut ODPIData) -> Data {
Data { data: data }
}
}
|
#![allow(clippy::too_many_arguments, dead_code)]
use ash::version::{DeviceV1_0, InstanceV1_0};
use ash::vk::{
Buffer, BufferCreateInfo, BufferUsageFlags, DescriptorSetLayout, DescriptorSetLayoutBinding,
DescriptorSetLayoutCreateInfo, DescriptorType, DeviceMemory, MemoryAllocateInfo,
MemoryMapFlags, MemoryPropertyFlags, PhysicalDevice, ShaderStageFlags, SharingMode,
};
use crossbeam::sync::ShardedLock;
use std::sync::Arc;
use vk_mem::{
Allocation, AllocationCreateFlags, AllocationCreateInfo, AllocationInfo, Allocator, MemoryUsage,
};
pub(crate) struct VkBuffer {
pub(crate) buffer: Buffer,
pub(crate) device_memory: DeviceMemory,
pub(crate) mapped_memory: *mut std::ffi::c_void,
pub(crate) allocation: Option<Allocation>,
pub(crate) allocation_info: Option<AllocationInfo>,
}
pub(crate) fn create_buffer<T>(
device: &ash::Device,
data: &[T],
buffer_size: u64,
allocator: Option<Arc<ShardedLock<Allocator>>>,
instance: &ash::Instance,
physical_device: PhysicalDevice,
usage_flag: BufferUsageFlags,
memory_properties: MemoryPropertyFlags,
) -> VkBuffer {
let buffer_info = BufferCreateInfo::builder()
.usage(usage_flag)
.sharing_mode(SharingMode::EXCLUSIVE)
.size(buffer_size);
if let Some(allocator) = allocator {
let allocation_info = AllocationCreateInfo {
usage: match usage_flag {
BufferUsageFlags::TRANSFER_SRC => MemoryUsage::CpuOnly,
x if ((x & BufferUsageFlags::VERTEX_BUFFER) != BufferUsageFlags::empty()
|| (x & BufferUsageFlags::INDEX_BUFFER) != BufferUsageFlags::empty())
&& (x & BufferUsageFlags::TRANSFER_SRC) == BufferUsageFlags::empty() =>
{
MemoryUsage::GpuOnly
}
_ => MemoryUsage::CpuToGpu,
},
flags: if (memory_properties & MemoryPropertyFlags::HOST_VISIBLE)
== MemoryPropertyFlags::HOST_VISIBLE
&& (memory_properties & MemoryPropertyFlags::HOST_COHERENT)
== MemoryPropertyFlags::HOST_COHERENT
{
AllocationCreateFlags::MAPPED
} else {
AllocationCreateFlags::NONE
},
required_flags: memory_properties,
preferred_flags: MemoryPropertyFlags::empty(),
memory_type_bits: 0,
pool: None,
user_data: None,
};
let (buffer, allocation, allocation_info) = allocator
.read()
.expect("Failed to lock allocator.")
.create_buffer(&buffer_info, &allocation_info)
.expect("Failed to create staging buffer for Nuklear texture.");
let device_memory = allocation_info.get_device_memory();
let mapped = allocation_info.get_mapped_data();
unsafe {
std::ptr::copy_nonoverlapping(
data.as_ptr() as *const std::ffi::c_void,
mapped as *mut std::ffi::c_void,
buffer_size as usize,
);
}
VkBuffer {
buffer,
device_memory,
allocation: Some(allocation),
allocation_info: Some(allocation_info),
mapped_memory: mapped as *mut std::ffi::c_void,
}
} else {
unsafe {
let buffer = device
.create_buffer(&buffer_info, None)
.expect("Failed to create staging buffer for Nuklear texture");
let memory_requirements = device.get_buffer_memory_requirements(buffer);
let allocation_info = MemoryAllocateInfo::builder()
.allocation_size(memory_requirements.size)
.memory_type_index(find_memory_type_index(
instance,
physical_device,
memory_requirements.memory_type_bits,
));
let device_memory = device
.allocate_memory(&allocation_info, None)
.expect("Failed to allocate memory for staging buffer.");
device
.bind_buffer_memory(buffer, device_memory, 0)
.expect("Failed to bind buffer memory.");
let mapped = device
.map_memory(device_memory, 0, buffer_size, MemoryMapFlags::empty())
.expect("Failed to map memory for staging buffer.");
std::ptr::copy_nonoverlapping(
data.as_ptr() as *const std::ffi::c_void,
mapped,
buffer_size as usize,
);
VkBuffer {
buffer,
device_memory,
allocation: None,
allocation_info: None,
mapped_memory: mapped,
}
}
}
}
pub(crate) fn create_descriptor_set_layout(
device: &ash::Device,
binding: u32,
descriptor_type: DescriptorType,
shader_stage: ShaderStageFlags,
) -> DescriptorSetLayout {
let layout_binding = vec![DescriptorSetLayoutBinding::builder()
.binding(binding)
.descriptor_count(1)
.descriptor_type(descriptor_type)
.stage_flags(shader_stage)
.build()];
let layout_info = DescriptorSetLayoutCreateInfo::builder().bindings(layout_binding.as_slice());
unsafe {
device
.create_descriptor_set_layout(&layout_info, None)
.expect("Failed to create descriptor set layout.")
}
}
pub(crate) fn find_memory_type_index(
instance: &ash::Instance,
physical_device: PhysicalDevice,
memory_type: u32,
) -> u32 {
unsafe {
let properties = instance.get_physical_device_memory_properties(physical_device);
let flags = MemoryPropertyFlags::HOST_VISIBLE | MemoryPropertyFlags::HOST_COHERENT;
for i in 0..properties.memory_type_count {
if ((memory_type & (1 << i)) != 0)
&& ((properties.memory_types[i as usize].property_flags & flags) == flags)
{
return i;
}
}
}
0
}
|
#[doc = "Register `AHBRSTR` reader"]
pub type R = crate::R<AHBRSTR_SPEC>;
#[doc = "Register `AHBRSTR` writer"]
pub type W = crate::W<AHBRSTR_SPEC>;
#[doc = "Field `IOPARST` reader - I/O port A reset"]
pub type IOPARST_R = crate::BitReader<IOPARST_A>;
#[doc = "I/O port A reset\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum IOPARST_A {
#[doc = "1: Reset the selected module"]
Reset = 1,
}
impl From<IOPARST_A> for bool {
#[inline(always)]
fn from(variant: IOPARST_A) -> Self {
variant as u8 != 0
}
}
impl IOPARST_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<IOPARST_A> {
match self.bits {
true => Some(IOPARST_A::Reset),
_ => None,
}
}
#[doc = "Reset the selected module"]
#[inline(always)]
pub fn is_reset(&self) -> bool {
*self == IOPARST_A::Reset
}
}
#[doc = "Field `IOPARST` writer - I/O port A reset"]
pub type IOPARST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, IOPARST_A>;
impl<'a, REG, const O: u8> IOPARST_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Reset the selected module"]
#[inline(always)]
pub fn reset(self) -> &'a mut crate::W<REG> {
self.variant(IOPARST_A::Reset)
}
}
#[doc = "Field `IOPBRST` reader - I/O port B reset"]
pub use IOPARST_R as IOPBRST_R;
#[doc = "Field `IOPCRST` reader - I/O port C reset"]
pub use IOPARST_R as IOPCRST_R;
#[doc = "Field `IOPDRST` reader - I/O port D reset"]
pub use IOPARST_R as IOPDRST_R;
#[doc = "Field `IOPFRST` reader - I/O port F reset"]
pub use IOPARST_R as IOPFRST_R;
#[doc = "Field `TSCRST` reader - Touch sensing controller reset"]
pub use IOPARST_R as TSCRST_R;
#[doc = "Field `ADC12RST` reader - ADC1 and ADC2 reset"]
pub use IOPARST_R as ADC12RST_R;
#[doc = "Field `IOPBRST` writer - I/O port B reset"]
pub use IOPARST_W as IOPBRST_W;
#[doc = "Field `IOPCRST` writer - I/O port C reset"]
pub use IOPARST_W as IOPCRST_W;
#[doc = "Field `IOPDRST` writer - I/O port D reset"]
pub use IOPARST_W as IOPDRST_W;
#[doc = "Field `IOPFRST` writer - I/O port F reset"]
pub use IOPARST_W as IOPFRST_W;
#[doc = "Field `TSCRST` writer - Touch sensing controller reset"]
pub use IOPARST_W as TSCRST_W;
#[doc = "Field `ADC12RST` writer - ADC1 and ADC2 reset"]
pub use IOPARST_W as ADC12RST_W;
impl R {
#[doc = "Bit 17 - I/O port A reset"]
#[inline(always)]
pub fn ioparst(&self) -> IOPARST_R {
IOPARST_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 18 - I/O port B reset"]
#[inline(always)]
pub fn iopbrst(&self) -> IOPBRST_R {
IOPBRST_R::new(((self.bits >> 18) & 1) != 0)
}
#[doc = "Bit 19 - I/O port C reset"]
#[inline(always)]
pub fn iopcrst(&self) -> IOPCRST_R {
IOPCRST_R::new(((self.bits >> 19) & 1) != 0)
}
#[doc = "Bit 20 - I/O port D reset"]
#[inline(always)]
pub fn iopdrst(&self) -> IOPDRST_R {
IOPDRST_R::new(((self.bits >> 20) & 1) != 0)
}
#[doc = "Bit 22 - I/O port F reset"]
#[inline(always)]
pub fn iopfrst(&self) -> IOPFRST_R {
IOPFRST_R::new(((self.bits >> 22) & 1) != 0)
}
#[doc = "Bit 24 - Touch sensing controller reset"]
#[inline(always)]
pub fn tscrst(&self) -> TSCRST_R {
TSCRST_R::new(((self.bits >> 24) & 1) != 0)
}
#[doc = "Bit 28 - ADC1 and ADC2 reset"]
#[inline(always)]
pub fn adc12rst(&self) -> ADC12RST_R {
ADC12RST_R::new(((self.bits >> 28) & 1) != 0)
}
}
impl W {
#[doc = "Bit 17 - I/O port A reset"]
#[inline(always)]
#[must_use]
pub fn ioparst(&mut self) -> IOPARST_W<AHBRSTR_SPEC, 17> {
IOPARST_W::new(self)
}
#[doc = "Bit 18 - I/O port B reset"]
#[inline(always)]
#[must_use]
pub fn iopbrst(&mut self) -> IOPBRST_W<AHBRSTR_SPEC, 18> {
IOPBRST_W::new(self)
}
#[doc = "Bit 19 - I/O port C reset"]
#[inline(always)]
#[must_use]
pub fn iopcrst(&mut self) -> IOPCRST_W<AHBRSTR_SPEC, 19> {
IOPCRST_W::new(self)
}
#[doc = "Bit 20 - I/O port D reset"]
#[inline(always)]
#[must_use]
pub fn iopdrst(&mut self) -> IOPDRST_W<AHBRSTR_SPEC, 20> {
IOPDRST_W::new(self)
}
#[doc = "Bit 22 - I/O port F reset"]
#[inline(always)]
#[must_use]
pub fn iopfrst(&mut self) -> IOPFRST_W<AHBRSTR_SPEC, 22> {
IOPFRST_W::new(self)
}
#[doc = "Bit 24 - Touch sensing controller reset"]
#[inline(always)]
#[must_use]
pub fn tscrst(&mut self) -> TSCRST_W<AHBRSTR_SPEC, 24> {
TSCRST_W::new(self)
}
#[doc = "Bit 28 - ADC1 and ADC2 reset"]
#[inline(always)]
#[must_use]
pub fn adc12rst(&mut self) -> ADC12RST_W<AHBRSTR_SPEC, 28> {
ADC12RST_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "AHB peripheral reset register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ahbrstr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ahbrstr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct AHBRSTR_SPEC;
impl crate::RegisterSpec for AHBRSTR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`ahbrstr::R`](R) reader structure"]
impl crate::Readable for AHBRSTR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`ahbrstr::W`](W) writer structure"]
impl crate::Writable for AHBRSTR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets AHBRSTR to value 0"]
impl crate::Resettable for AHBRSTR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use {
crate::schema::{Human, NewHuman},
juniper::FieldResult,
std::{
collections::HashMap,
sync::{Arc, RwLock},
},
};
#[derive(Debug, Default)]
pub struct Database {
humans: HashMap<u32, Human>,
counter: u32,
}
/// Arbitrary context data.
#[derive(Debug, Default)]
pub struct Context {
pub database: Arc<RwLock<Database>>,
}
impl juniper::Context for Context {}
impl AsRef<Self> for Context {
#[inline]
fn as_ref(&self) -> &Self {
self
}
}
impl Context {
pub fn get_human(&self, id: &str) -> FieldResult<Human> {
let id: u32 = id.parse()?;
let inner = self
.database
.read()
.map_err(|_| "failed to acquire a lock")?;
inner
.humans
.get(&id)
.cloned()
.ok_or_else(|| "no such human".into())
}
pub fn all_humans(&self) -> FieldResult<Vec<Human>> {
let inner = self
.database
.read()
.map_err(|_| "failed to acquire a lock")?;
Ok(inner.humans.values().cloned().collect())
}
pub fn add_human(&self, new_human: NewHuman) -> FieldResult<Human> {
let mut inner = self
.database
.write()
.map_err(|_| "failed to acquire a lock")?;
let new_id = inner.counter;
let human = Human {
id: new_id.to_string(),
name: new_human.name,
appears_in: new_human.appears_in,
home_planet: new_human.home_planet,
};
inner.humans.insert(new_id, human.clone());
inner.counter += 1;
Ok(human)
}
}
|
use libc;
pub const AF_UNSPEC: u16 = libc::AF_UNSPEC as u16;
pub const AF_UNIX: u16 = libc::AF_UNIX as u16;
// pub const AF_LOCAL: u16 = libc::AF_LOCAL as u16;
pub const AF_INET: u16 = libc::AF_INET as u16;
pub const AF_AX25: u16 = libc::AF_AX25 as u16;
pub const AF_IPX: u16 = libc::AF_IPX as u16;
pub const AF_APPLETALK: u16 = libc::AF_APPLETALK as u16;
pub const AF_NETROM: u16 = libc::AF_NETROM as u16;
pub const AF_BRIDGE: u16 = libc::AF_BRIDGE as u16;
pub const AF_ATMPVC: u16 = libc::AF_ATMPVC as u16;
pub const AF_X25: u16 = libc::AF_X25 as u16;
pub const AF_INET6: u16 = libc::AF_INET6 as u16;
pub const AF_ROSE: u16 = libc::AF_ROSE as u16;
pub const AF_DECNET: u16 = libc::AF_DECnet as u16;
pub const AF_NETBEUI: u16 = libc::AF_NETBEUI as u16;
pub const AF_SECURITY: u16 = libc::AF_SECURITY as u16;
pub const AF_KEY: u16 = libc::AF_KEY as u16;
pub const AF_NETLINK: u16 = libc::AF_NETLINK as u16;
// pub const AF_ROUTE: u16 = libc::AF_ROUTE as u16;
pub const AF_PACKET: u16 = libc::AF_PACKET as u16;
pub const AF_ASH: u16 = libc::AF_ASH as u16;
pub const AF_ECONET: u16 = libc::AF_ECONET as u16;
pub const AF_ATMSVC: u16 = libc::AF_ATMSVC as u16;
pub const AF_RDS: u16 = libc::AF_RDS as u16;
pub const AF_SNA: u16 = libc::AF_SNA as u16;
pub const AF_IRDA: u16 = libc::AF_IRDA as u16;
pub const AF_PPPOX: u16 = libc::AF_PPPOX as u16;
pub const AF_WANPIPE: u16 = libc::AF_WANPIPE as u16;
pub const AF_LLC: u16 = libc::AF_LLC as u16;
pub const AF_CAN: u16 = libc::AF_CAN as u16;
pub const AF_TIPC: u16 = libc::AF_TIPC as u16;
pub const AF_BLUETOOTH: u16 = libc::AF_BLUETOOTH as u16;
pub const AF_IUCV: u16 = libc::AF_IUCV as u16;
pub const AF_RXRPC: u16 = libc::AF_RXRPC as u16;
pub const AF_ISDN: u16 = libc::AF_ISDN as u16;
pub const AF_PHONET: u16 = libc::AF_PHONET as u16;
pub const AF_IEEE802154: u16 = libc::AF_IEEE802154 as u16;
pub const AF_CAIF: u16 = libc::AF_CAIF as u16;
pub const AF_ALG: u16 = libc::AF_ALG as u16;
|
use rustimate_core::member::Member;
use std::collections::HashMap;
use uuid::Uuid;
pub(crate) struct ResultSummary {
valid_votes: Vec<(Uuid, String, Option<f64>)>,
invalid_votes: Vec<(Uuid, Option<String>)>,
mean: f64,
median: f64,
mode: f64
}
impl ResultSummary {
pub(crate) fn new(members: &[&Member], votes: &[(Uuid, String)]) -> Self {
let mut valid_votes = Vec::new();
let mut invalid_votes = Vec::new();
for m in members {
let hit = votes
.iter()
.find_map(|v| if &v.0 == m.user_id() { Some(v.1.clone()) } else { None });
match hit {
Some(h) => {
let i = match h.parse::<f64>() {
Ok(n) => Some(n),
Err(_) => None
};
valid_votes.push((*m.user_id(), h, i))
}
None => invalid_votes.push((*m.user_id(), None))
}
}
let mut numbers: Vec<f64> = valid_votes.iter().flat_map(|x| x.2).collect();
numbers.sort_by(|a, b| a.partial_cmp(b).expect("Uncomparable?"));
let sum = numbers.iter().sum::<f64>();
let mean = if numbers.is_empty() { 0.0 } else { sum / (numbers.len() as f64) };
let median = if numbers.is_empty() { 0.0 } else { numbers[numbers.len() / 2] };
let mode = if numbers.is_empty() {
0.0
} else {
let mut occurrences = HashMap::new();
for value in numbers {
*occurrences.entry(value.to_string()).or_insert(0) += 1;
}
occurrences
.into_iter()
.max_by_key(|&(_, count)| count)
.map(|(val, _)| val.parse::<f64>().expect("Did it change somehow?"))
.expect("Mode attempted with zero numbers")
};
Self {
valid_votes,
invalid_votes,
mean,
median,
mode
}
}
pub(crate) const fn valid_votes(&self) -> &Vec<(Uuid, String, Option<f64>)> {
&self.valid_votes
}
pub(crate) const fn invalid_votes(&self) -> &Vec<(Uuid, Option<String>)> {
&self.invalid_votes
}
pub(crate) const fn mean(&self) -> f64 {
self.mean
}
pub(crate) const fn median(&self) -> f64 {
self.median
}
pub(crate) const fn mode(&self) -> f64 {
self.mode
}
}
|
#[derive(Clone, Copy, PartialEq, Debug)]
pub struct Coord {
pub x: i16,
pub y: i16,
}
impl Coord {
pub fn is_outside(&self, top_left: Coord, bottom_right: Coord) -> bool {
self.x < top_left.x
|| self.x > bottom_right.x
|| self.y < top_left.y
|| self.y > bottom_right.y
}
}
#[derive(Clone, Copy, Debug)]
pub enum Direction {
Up,
Down,
Left,
Right,
}
impl Direction {
pub fn is_opposing(&self, d: &Direction) -> bool {
match (self, d.to_owned()) {
(Direction::Up, Direction::Down) => true,
(Direction::Down, Direction::Up) => true,
(Direction::Left, Direction::Right) => true,
(Direction::Right, Direction::Left) => true,
_ => false,
}
}
}
pub trait Move {
fn r#move(&self, d: &Direction) -> Self;
fn move_reverse(&self, d: &Direction) -> Self;
}
impl Move for Coord {
fn r#move(&self, d: &Direction) -> Coord {
let (rx, ry): (i16, i16) = match d {
Direction::Up => (0, -1),
Direction::Down => (0, 1),
Direction::Left => (-1, 0),
Direction::Right => (1, 0),
};
Coord {
x: self.x + rx,
y: self.y + ry,
}
}
fn move_reverse(&self, d: &Direction) -> Coord {
let (rx, ry): (i16, i16) = match d {
Direction::Up => (0, 1),
Direction::Down => (0, -1),
Direction::Left => (1, 0),
Direction::Right => (-1, 0),
};
Coord {
x: self.x + rx,
y: self.y + ry,
}
}
}
|
// Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
use actix::prelude::*;
use anyhow::Result;
#[derive(Debug, Clone)]
pub enum NodeCommand {
StartMiner(),
StopMiner(),
}
impl Message for NodeCommand {
type Result = Result<()>;
}
|
extern crate nom;
use nom::{IResult, le_u64, le_u32, le_u16, le_u8, rest};
use std::cmp::Ordering;
//
// PFS file header
//
#[derive(Debug, PartialEq, Eq)]
pub struct PfsHeader {
pub header_version : u32,
pub data_size : u32,
}
pub fn pfs_header(input : &[u8]) -> IResult<&[u8], PfsHeader> {
do_parse!( input,
tag!(b"PFS.HDR.") >>
v: le_u32 >>
s: le_u32 >>
( PfsHeader {
header_version: v,
data_size: s,
}
)
)
}
//
// PFS file footer
//
#[derive(Debug, PartialEq, Eq)]
pub struct PfsFooter {
pub checksum : u32,
pub data_size : u32,
}
pub fn pfs_footer(input : &[u8]) -> IResult<&[u8], PfsFooter> {
do_parse!(input,
s : le_u32 >>
c : le_u32 >>
tag!(b"PFS.FTR.") >>
( PfsFooter {
checksum: c,
data_size: s,
}
)
)
}
//
// GUID
//
#[derive(Debug, PartialEq, Eq)]
pub struct Guid {
pub data1 : u32,
pub data2 : u16,
pub data3 : u16,
pub data4 : [u8; 8],
}
pub fn guid (input : &[u8]) -> IResult<&[u8], Guid> {
do_parse!(input,
d1 : le_u32 >>
d2 : le_u16 >>
d3 : le_u16 >>
d4 : count_fixed!(u8, le_u8, 8) >>
( Guid {
data1 : d1,
data2 : d2,
data3 : d3,
data4 : d4,
}
)
)
}
//
// PFS section
//
#[derive(Debug, PartialEq, Eq)]
pub struct PfsSection<'a> {
pub name : String,
pub guid : Guid,
pub header_version: u32,
pub version_type : [u8; 4],
pub version : [u16; 4],
pub reserved : u64,
pub data_size : u32,
pub data_sig_size : u32,
pub meta_size : u32,
pub meta_sig_size : u32,
pub unknown : [u8; 16],
pub data : Option<&'a[u8]>,
pub data_sig : Option<&'a[u8]>,
pub meta : Option<&'a[u8]>,
pub meta_sig : Option<&'a[u8]>,
}
pub fn pfs_section (input : &[u8]) -> IResult<&[u8], PfsSection> {
do_parse!(input,
g : guid >>
hv : le_u32 >>
vt : count_fixed!(u8, le_u8, 4) >>
v : count_fixed!(u16, le_u16, 4) >>
r : le_u64 >>
ds : le_u32 >>
dss : le_u32 >>
ms : le_u32 >>
mss : le_u32 >>
u : count_fixed!(u8, le_u8, 16) >>
dp : cond_with_error!(ds > 0, take!(ds)) >>
dsp : cond_with_error!(dss > 0, take!(dss)) >>
mp : cond_with_error!(ms > 0, take!(ms)) >>
msp : cond_with_error!(mss > 0, take!(mss)) >>
( PfsSection {
name : String::new(), // Name will be populated later based on information section
guid : g,
header_version : hv,
version_type : vt,
version : v,
reserved : r,
data_size : ds,
data_sig_size : dss,
meta_size : ms,
meta_sig_size : mss,
unknown : u,
data : dp,
data_sig: dsp,
meta : mp,
meta_sig: msp,
}
)
)
}
//
// Complete PFS file
//
#[derive(Debug, PartialEq, Eq)]
pub struct PfsFile<'a> {
pub header : PfsHeader,
pub sections : Vec<PfsSection<'a> >,
pub footer : PfsFooter,
}
pub fn pfs_file (input : &[u8]) -> IResult<&[u8], PfsFile> {
do_parse!(input,
h : pfs_header >>
sf : many_till!(pfs_section, pfs_footer) >>
( PfsFile {
header: h,
sections: sf.0,
footer: sf.1,
}
)
)
}
//
// PFS zlib-compressed section
//
#[derive(Debug, PartialEq, Eq)]
pub struct PfsCompressedSection<'a> {
pub size : u32,
pub data : &'a[u8],
}
pub fn pfs_compressed_section (input : &[u8]) -> IResult<&[u8], PfsCompressedSection> {
do_parse!(input,
s : le_u32 >> // Obtain data size
tag!(b"\xAA\xEE\xAA\x76\x1B\xEC\xBB\x20\xF1\xE6\x51") >> // Check for compressed section header
take!(1) >> // Skip 1 byte
d : take!(s) >> // Obtain payload
take!(16) >> // Skip footer
( PfsCompressedSection {
size: s,
data: d,
}
)
)
}
//
// PFS chunk
//
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct PfsChunk<'a> {
pub order_number : u16,
pub data : &'a[u8],
}
impl<'a> Ord for PfsChunk<'a> {
fn cmp(&self, other: &PfsChunk<'a>) -> Ordering {
self.order_number.cmp(&other.order_number)
}
}
impl<'a> PartialOrd for PfsChunk<'a> {
fn partial_cmp(&self, other: &PfsChunk<'a>) -> Option<Ordering> {
Some(self.cmp(other))
}
}
pub fn pfs_chunk (input : &[u8]) -> IResult<&[u8], PfsChunk> {
do_parse!(input,
take!(0x3E) >> // Skip first 0x3E bytes
on : le_u16 >> // Get order number
take!(0x248 - 0x40) >> // Skip the rest of chunk header
d: rest >>
( PfsChunk {
order_number: on,
data : d,
}
)
)
}
//
// PFS information section
//
#[derive(Debug, PartialEq, Eq)]
pub struct PfsInfoSection {
pub header_version : u32,
pub guid : Guid,
pub version : [u16; 4],
pub version_type : [u8; 4],
pub name : String,
}
pub fn pfs_info_section (input : &[u8]) -> IResult<&[u8], PfsInfoSection> {
do_parse!(input,
hv : le_u32 >>
g : guid >>
v : count_fixed!(u16, le_u16, 4) >>
vt : count_fixed!(u8, le_u8, 4) >>
l : le_u16 >>
n : count!(le_u16, l as usize) >>
tag!("\x00\x00") >>
( PfsInfoSection {
header_version: hv,
guid : g,
version : v,
version_type : vt,
name : String::from_utf16_lossy(&n),
}
)
)
}
pub fn pfs_info (input : &[u8]) -> IResult<&[u8], Vec<PfsInfoSection>> {
do_parse!(input,
v : many0!(complete!(pfs_info_section)) >>
( v )
)
} |
//! Pagination support for Iron requests.
//!
//! Contains a trait, `Paginate`, that is implemented for `Iterator`, that can be used to subset an
//! iterator based on Iron request parameters.
use Result;
use iron::{Plugin, Request};
use params::{Params, Value};
use std::iter::{Skip, Take};
/// The default page, if one is not specified in the request.
///
/// We 1-index pages because Github does.
pub const DEFAULT_PAGE: usize = 1;
/// The default number of items per page.
pub const DEFAULT_PER_PAGE: usize = 30;
/// The maximum number of items that can be returned per page.
///
/// If more than this amount is requested, the number of items returned is clamped to 100.
pub const MAX_PER_PAGE: usize = 100;
/// Use an Iron request, specifically its parameters, to paginate over an iterator.
///
/// The parameters used:
///
/// - `per_page`: How many items to return per page. Defaults to `DEFAULT_PER_PAGE`.
/// - `page`: The (1-indexed) page to return. This is 1-indexed because Github's is, and I'm just
/// copying them.
///
/// An example paginated request might look like this:
///
/// ```bash
/// curl http://localhost:3000/cameras/ATLAS_CAM/images?page=2&per_page=10
/// ```
pub trait Paginate<I> {
/// Creates a pagination iterator from a request.
fn paginate(self, request: &mut Request) -> Result<Take<Skip<I>>>;
}
struct Pagination {
page: usize,
per_page: usize,
}
impl<I: Iterator> Paginate<I> for I {
fn paginate(self, request: &mut Request) -> Result<Take<Skip<I>>> {
let pagination = Pagination::new(request)?;
Ok(self.skip(pagination.skip()).take(pagination.take()))
}
}
impl Pagination {
pub fn new(request: &mut Request) -> Result<Pagination> {
let map = request.get::<Params>().unwrap();
let mut page = match map.find(&["page"]) {
Some(&Value::U64(page)) => page as usize,
Some(&Value::String(ref page)) => page.parse::<usize>()?,
_ => DEFAULT_PAGE,
};
if page == 0 {
page = 1;
}
let mut per_page = match map.find(&["per_page"]) {
Some(&Value::U64(per_page)) => per_page as usize,
Some(&Value::String(ref per_page)) => per_page.parse::<usize>()?,
_ => DEFAULT_PER_PAGE,
};
if per_page >= MAX_PER_PAGE {
per_page = MAX_PER_PAGE;
} else if per_page == 0 {
per_page = DEFAULT_PER_PAGE;
}
Ok(Pagination {
page: page,
per_page: per_page,
})
}
pub fn skip(&self) -> usize {
self.per_page * (self.page - 1)
}
pub fn take(&self) -> usize {
self.per_page
}
}
|
use crate::data_buffer::DataBuffer;
use tui::buffer::Buffer;
use tui::layout::Rect;
use tui::style::Color;
use tui::widgets::Widget;
pub struct WaveWidget<'a> {
waveform: &'a DataBuffer,
}
impl<'a> WaveWidget<'a> {
pub fn new(waveform: &'a DataBuffer) -> Self {
Self { waveform }
}
}
impl<'a> Widget for WaveWidget<'a> {
/// Draws the WaveWidget's waveform onto the terminal buffer
fn draw(&mut self, area: Rect, buf: &mut Buffer) {
let Rect { width, height, .. } = area;
let waveform_len = self.waveform.len();
assert!(waveform_len > width.into());
for col in 1..=width {
buf.get_mut(col, height / 2)
.set_char('=')
.set_fg(Color::Green);
}
for (index, &sample) in self
.waveform
.iter()
.skip(waveform_len - usize::from(width))
.enumerate()
{
let col = index as u16 + 1;
// Scale (might clip) sample to see more
let norm_y = sample * 5.;
let row = ((norm_y + 0.5) * f32::from(height)).floor() as u16;
// If would clip, don't render anything
if row > 0 && row < height {
buf.get_mut(col, row).set_char('#').set_fg(Color::Cyan);
}
}
}
}
|
use anyhow::Error;
use serde_derive::{Deserialize, Serialize};
// use wasm_bindgen::JsValue;
use yew::format::Json;
// use yew::services::fetch::{FetchService, FetchTask, Request, Response};
use yew::services::websocket::{WebSocketService, WebSocketStatus, WebSocketTask};
use yew::{html, Component, ComponentLink, Html, ShouldRender};
use web_sys::console;
type AsBinary = bool;
// pub enum Format {
// Json,
// }
pub enum WsAction {
Connect,
SendData(AsBinary),
Disconnect,
Lost,
}
pub enum Msg {
WsAction(WsAction),
WsReady(Result<WsResponse, Error>),
}
impl From<WsAction> for Msg {
fn from(action: WsAction) -> Self {
Msg::WsAction(action)
}
}
// /// This type is used to parse data from `./static/data.json` file and
// /// have to correspond the data layout from that file.
// #[derive(Deserialize, Debug)]
// pub struct DataFromFile {
// value: u32,
// }
/// This type is used as a request which sent to websocket connection.
#[derive(Serialize, Debug)]
struct WsRequest {
value: u32,
}
/// This type is an expected response from a websocket connection.
#[derive(Deserialize, Debug)]
pub struct WsResponse {
value: Option<u32>,
}
pub struct Model {
link: ComponentLink<Model>,
data: Option<u32>,
ws: Option<WebSocketTask>,
}
impl Model {
fn view_data(&self) -> Html {
if let Some(value) = self.data {
html! {
<p>{ value }</p>
}
} else {
html! {
<p>{ "Data hasn't fetched yet." }</p>
}
}
}
}
impl Component for Model {
type Message = Msg;
type Properties = ();
fn create(_: Self::Properties, link: ComponentLink<Self>) -> Self {
Self {
link,
data: None,
ws: None,
}
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
match msg {
Msg::WsAction(action) => match action {
WsAction::Connect => {
let callback = self.link.callback(|Json(data)| Msg::WsReady(data));
let notification = self.link.batch_callback(|status| match status {
WebSocketStatus::Opened => vec![],
WebSocketStatus::Closed | WebSocketStatus::Error => {
vec![WsAction::Lost.into()]
}
});
let task = WebSocketService::connect(
"ws://localhost:8000/ws/",
callback,
notification,
)
.unwrap();
self.ws = Some(task);
true
}
WsAction::SendData(binary) => {
let request = WsRequest { value: 321 };
if binary {
self.ws.as_mut().unwrap().send_binary(Json(&request));
} else {
self.ws.as_mut().unwrap().send(Json(&request));
}
false
}
WsAction::Disconnect => {
self.ws.take();
true
}
WsAction::Lost => {
self.ws = None;
true
}
},
Msg::WsReady(response) => {
unsafe {
console::log_1(&"hier".into());
}
match response {
Ok(ws_response) => {
unsafe {
console::log_1(&ws_response.value.into());
}
self.data = ws_response.value;
}
Err(error) => {
unsafe {
let kek = format!("{}", error);
console::log_1(&kek.into());
}
self.data = None;
}
}
// let js: JsValue = 4.into();
// console::log_2(&"Logging arbitrary values looks like".into(), &js);
// self.data = response.map(|data| data.value).ok();
true
}
}
}
fn change(&mut self, _: Self::Properties) -> ShouldRender {
false
}
fn view(&self) -> Html {
html! {
<div class="bg-white space-x-2">
{ self.view_data() }
<button class="bg-blue-500" disabled=self.ws.is_some()
onclick=self.link.callback(|_| WsAction::Connect)>
{ "Connect To WebSocket" }
</button>
<button class="bg-blue-500" disabled=self.ws.is_none()
onclick=self.link.callback(|_| WsAction::SendData(false))>
{ "Send To WebSocket" }
</button>
<button class="bg-blue-500" disabled=self.ws.is_none()
onclick=self.link.callback(|_| WsAction::SendData(true))>
{ "Send To WebSocket [binary]" }
</button>
<button class="bg-blue-500" disabled=self.ws.is_none()
onclick=self.link.callback(|_| WsAction::Disconnect)>
{ "Close WebSocket connection" }
</button>
</div>
}
}
}
|
// @TODO remove
use crate::app::MessageMapper;
use std::fmt;
type _HookFn = Box<dyn FnMut(&web_sys::Node)>; // todo
pub(crate) fn fmt_hook_fn<T>(h: &Option<T>) -> &'static str {
match h {
Some(_) => "Some(.. a dynamic handler ..)",
None => "None",
}
}
pub struct LifecycleHooks<Ms> {
pub did_mount: Option<DidMount<Ms>>,
pub did_update: Option<DidUpdate<Ms>>,
pub will_unmount: Option<WillUnmount<Ms>>,
}
impl<Ms> LifecycleHooks<Ms> {
pub const fn new() -> Self {
Self {
did_mount: None,
did_update: None,
will_unmount: None,
}
}
}
impl<Ms> fmt::Debug for LifecycleHooks<Ms> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"LifecycleHooks {{ did_mount:{:?}, did_update:{:?}, will_unmount:{} }}",
fmt_hook_fn(&self.did_mount),
fmt_hook_fn(&self.did_update),
fmt_hook_fn(&self.will_unmount)
)
}
}
impl<Ms: 'static, OtherMs: 'static> MessageMapper<Ms, OtherMs> for LifecycleHooks<Ms> {
type SelfWithOtherMs = LifecycleHooks<OtherMs>;
fn map_msg(self, f: impl FnOnce(Ms) -> OtherMs + 'static + Clone) -> Self::SelfWithOtherMs {
LifecycleHooks {
did_mount: self.did_mount.map(|d| DidMount {
actions: d.actions,
message: d.message.map(f.clone()),
}),
did_update: self.did_update.map(|d| DidUpdate {
actions: d.actions,
message: d.message.map(f.clone()),
}),
will_unmount: self.will_unmount.map(|d| WillUnmount {
actions: d.actions,
message: d.message.map(f),
}),
}
}
}
pub struct DidMount<Ms> {
pub actions: Box<dyn FnMut(&web_sys::Node)>,
pub message: Option<Ms>,
}
pub struct DidUpdate<Ms> {
pub actions: Box<dyn FnMut(&web_sys::Node)>,
pub message: Option<Ms>,
}
pub struct WillUnmount<Ms> {
pub actions: Box<dyn FnMut(&web_sys::Node)>,
pub message: Option<Ms>,
}
/// A constructor for `DidMount`, to be used in the API
pub fn did_mount<Ms>(mut actions: impl FnMut(&web_sys::Node) + 'static) -> DidMount<Ms> {
let closure = move |el: &web_sys::Node| actions(el);
DidMount {
actions: Box::new(closure),
message: None,
}
}
/// A constructor for `DidUpdate`, to be used in the API
pub fn did_update<Ms>(mut actions: impl FnMut(&web_sys::Node) + 'static) -> DidUpdate<Ms> {
let closure = move |el: &web_sys::Node| actions(el);
DidUpdate {
actions: Box::new(closure),
message: None,
}
}
/// A constructor for `WillUnmount`, to be used in the API
pub fn will_unmount<Ms>(mut actions: impl FnMut(&web_sys::Node) + 'static) -> WillUnmount<Ms> {
let closure = move |el: &web_sys::Node| actions(el);
WillUnmount {
actions: Box::new(closure),
message: None,
}
}
|
use ::MessageHandler;
use std::sync::mpsc::{Sender, Receiver,TryIter};
use errors::*;
use mio::{Evented, Poll, Token, Ready, PollOpt, Registration,SetReadiness};
use std::io::Result as IOResult;
pub fn write_pipeline(sender: Sender<MsgBuf>, receiver: Receiver<MsgBuf>) -> (MessageSource, MessageSink) {
let (registration, set_readiness) = Registration::new2();
(MessageSource{registration: registration, receiver: receiver}, MessageSink{sender: sender, set_readiness: set_readiness})
}
#[derive(Debug, Clone)]
pub struct MsgBuf {
pub conn_idx: usize,
pub buf: Vec<u8>,
}
impl MsgBuf {
pub fn new(conn_idx: usize, buf: Vec<u8>) -> MsgBuf {
MsgBuf {
conn_idx: conn_idx,
buf: buf,
}
}
pub fn shutdown_msg() -> MsgBuf {
MsgBuf {
conn_idx: 111_111,
buf: vec!{},
}
}
}
#[derive(Debug, Clone)]
pub struct MessageSink {
sender: Sender<MsgBuf>,
set_readiness: SetReadiness,
}
impl MessageSink {
pub fn send_message(&self, msg: MsgBuf) -> Result<()> {
self.sender.send(msg)?;
self.set_readiness.set_readiness(Ready::writable())?;
Ok(())
}
}
pub struct MessageSource {
receiver: Receiver<MsgBuf>,
registration: Registration,
}
impl MessageSource {
pub fn try_iter(&self) -> TryIter<MsgBuf> {
self.receiver.try_iter()
}
}
impl Evented for MessageSource {
fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> IOResult<()> {
self.registration.register(poll, token, interest, opts)
}
fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> IOResult<()> {
self.registration.reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> IOResult<()> {
#[allow(deprecated)]
self.registration.deregister(poll)
}
}
pub struct Worker<'a, I: 'a, O: 'a> {
handler: &'a MessageHandler<Req=I, Resp=O>,
read_rx: Receiver<MsgBuf>,
sink: MessageSink,
}
impl<'a, I, O> Worker<'a, I, O> {
pub fn new(handler: &'a MessageHandler<Req=I, Resp=O>, read_rx: Receiver<MsgBuf>,
sink: MessageSink) -> Worker<'a, I, O> {
Worker {
handler: handler,
read_rx: read_rx,
sink: sink,
}
}
pub fn run(&mut self) -> Result<()> {
loop {
if !self.readloop() {
info!("worker exiting");
return Ok(());
}
}
}
fn readloop(&self) -> bool {
match self.read_rx.recv() {
Ok(buf) => {
self.handle_input(buf)
},
Err(e) => {
info!("error receiving input in worker: {:?}. presuming shutdown", e);
false
}
}
}
fn handle_input(&self, buf: MsgBuf) -> bool {
match self.handler.deserialize(buf.buf) {
Ok(req) => {
self.process_and_reply(buf.conn_idx, req)
},
Err(e) => {
warn!("unable to deserialize message: {:?}", e);
true
}
}
}
fn process_and_reply(&self, conn_idx: usize, req: I) -> bool {
match self.handler.process(req) {
Ok(resp) => {
self.serialize_and_write(conn_idx, resp)
},
Err(e) => {
warn!("unable to process message: {:?}", e);
true
}
}
}
fn serialize_and_write(&self, conn_idx: usize, resp: O) -> bool {
match self.handler.serialize(resp) {
Ok(buf) => {
self.write_response(MsgBuf::new(conn_idx, buf))
},
Err(e) => {
warn!("unable to serialize response: {:?}", e);
true
}
}
}
fn write_response(&self, buf: MsgBuf) -> bool {
match self.sink.send_message(buf) {
Ok(()) => true,
Err(e) => {
info!("error sending output in worker: {:?}. presuming shutdown", e);
false
}
}
}
} |
mod connect;
mod disconnect;
mod level_room;
mod push_message;
pub use connect::Connect;
pub use disconnect::Disconnect;
pub use push_message::MessagePayload;
pub use push_message::MessagePayloadHeader;
mod chat_server;
pub use chat_server::ChatServer;
|
use rppal::gpio::*;
use std::time::*;
use std::thread::sleep;
fn discharge(gnd: &mut IoPin, pin: &mut IoPin) {
// pull to ground
gnd.set_mode(Mode::Output);
gnd.set_low();
pin.set_mode(Mode::Output);
pin.set_low();
// approx discharge time <1ms (220nF)
sleep(Duration::from_millis(10));
// no pull on ground to avoid charging
gnd.set_mode(Mode::Input);
gnd.set_pullupdown(PullUpDown::Off);
// pulldown on pin to detect push
pin.set_mode(Mode::Input);
pin.set_pullupdown(PullUpDown::PullDown);
}
fn measure_resistor(gnd: &mut IoPin, pin: &mut IoPin) -> u128 {
// start charging condensator
gnd.set_mode(Mode::Output);
gnd.set_low();
pin.set_pullupdown(PullUpDown::Off);
let start = Instant::now();
// wait for charge
let mut us = 0;
while pin.is_low() {
sleep(Duration::from_micros(1));
us += 1;
if us > 10000 { break }
}
return start.elapsed().as_micros();
}
fn measure_push(gnd: &mut IoPin, pin: &mut IoPin) -> Option<(u128,u128)> {
// detect fake push and let time for current to establish
sleep(Duration::from_millis(1));
if pin.is_low() { return None }
let start = Instant::now();
// which button ?
let resistor = measure_resistor(gnd, pin);
// wait for release
pin.set_pullupdown(PullUpDown::PullDown);
let mut ms = 0;
while pin.is_high() {
sleep(Duration::from_millis(1));
ms += 1;
if ms > 10000 { break }
}
let duration = start.elapsed().as_millis();
discharge(gnd, pin);
return Some((resistor, duration));
}
fn in_range(value: u128, expected: u128) -> bool {
if expected < 10 { return value < (expected * 2) }
return value > (expected * 8 / 10 ) && value < (expected * 12 / 10)
}
#[derive(Debug)]
enum Button {
Snooze, B1, B2, Time, SpkrLow, SpkrHigh, Left, Right, OnOff
}
fn detect_button(gnd: &mut IoPin, pin1: &mut IoPin, pin2: &mut IoPin) -> Option<Button> {
if pin1.is_high() {
let (resistor, time) = measure_push(gnd, pin1)?;
let button =
if in_range(resistor,1700) { Some(Button::Right) }
else if in_range(resistor, 750) { Some(Button::Left) }
else if in_range(resistor, 190) { Some(Button::SpkrHigh) }
else if in_range(resistor, 5) { Some(Button::SpkrLow) }
else { println!("resistor {}", resistor); None};
println!("Pushed for {} ms", time);
return button;
}
else if pin2.is_high() {
let (resistor, time) = measure_push(gnd, pin2)?;
let button =
if in_range(resistor, 1680) { Some(Button::B1) }
else if in_range(resistor, 3480) { Some(Button::B2) }
else if in_range(resistor, 190) { Some(Button::Time) }
else if in_range(resistor, 750) { Some(Button::Snooze) }
else { println!("resistor {}", resistor); None };
println!("Pushed for {} ms", time);
return button;
}
return None;
}
fn main() {
println!("Hello, world!");
let gpio = Gpio::new().unwrap();
let mut pin_g = gpio.get(22).unwrap().into_io(Mode::Input);
let mut pin_j = gpio.get(23).unwrap().into_io(Mode::Input);
let mut pin_b = gpio.get(24).unwrap().into_io(Mode::Input);
discharge(&mut pin_b, &mut pin_j);
discharge(&mut pin_b, &mut pin_g);
let mut j_mem = pin_j.is_high();
let mut g_mem = pin_g.is_high();
loop {
let j = pin_j.is_high();
let g = pin_g.is_high();
if j != j_mem || g != g_mem {
let btn = detect_button(&mut pin_b, &mut pin_j, &mut pin_g);
println!("Pushed ci {:?}", btn);
j_mem = j;
g_mem = g;
}
}
}
|
pub use self::dao_user::*;
mod dao_user; |
use regex::Regex;
#[derive(Debug, PartialEq)]
pub struct Rule {
field_name: String,
fst_valid_first_idx: usize,
fst_valid_last_idx: usize,
snd_valid_first_idx: usize,
snd_valid_last_idx: usize,
}
#[derive(Debug, PartialEq)]
pub struct Ticket {
fields: Vec<usize>,
}
#[aoc_generator(day16)]
pub fn input_generator(input: &str) -> (Vec<Ticket>, Vec<Rule>) {
let rules_regex = Regex::new(r"(.*): (\d+)-(\d+) or (\d+)-(\d+)").unwrap();
let mut rules: Vec<Rule> = Vec::new();
let mut tickets = Vec::new();
let mut tickets_stage = false;
for l in input.lines() {
if l == "your ticket:" || l == "nearby tickets:" || l == "" {
tickets_stage = true;
continue;
}
if tickets_stage {
tickets.push(Ticket {
fields: l.split(",").map(|c| c.parse::<usize>().unwrap()).collect(),
});
} else {
let captures = rules_regex.captures(l).unwrap();
rules.push(Rule {
field_name: captures[1].to_string(),
fst_valid_first_idx: captures[2].parse::<usize>().unwrap(),
fst_valid_last_idx: captures[3].parse::<usize>().unwrap(),
snd_valid_first_idx: captures[4].parse::<usize>().unwrap(),
snd_valid_last_idx: captures[5].parse::<usize>().unwrap(),
});
}
}
(tickets, rules)
}
#[aoc(day16, part1)]
pub fn part1(input: &(Vec<Ticket>, Vec<Rule>)) -> usize {
remove_invalid(&input.0, &input.1).0
}
#[aoc(day16, part2)]
pub fn part2(input: &(Vec<Ticket>, Vec<Rule>)) -> usize {
let valid_idxs = remove_invalid(&input.0, &input.1).1;
let mut possible: Vec<Vec<bool>> = vec![vec![true; input.1.len()]; input.0[0].fields.len()];
for i in 1..valid_idxs.len() {
let ticket = &input.0[valid_idxs[i]];
for (field_num, field) in ticket.fields.iter().enumerate() {
for (rule_num, rule) in input.1.iter().enumerate() {
if !((field >= &rule.fst_valid_first_idx && field <= &rule.fst_valid_last_idx)
|| (field >= &rule.snd_valid_first_idx && field <= &rule.snd_valid_last_idx))
{
// We could terminate this early
possible[field_num][rule_num] = false;
}
}
}
}
loop {
let mut match_count = 0;
for i in 0..possible.len() {
let (is_single, idx) = single_true(&possible[i]);
if is_single {
match_count += 1;
for (curr_idx, p) in possible.iter_mut().enumerate() {
// Do no clear the field itself
if curr_idx == i {
continue;
}
p[idx] = false;
}
}
}
if match_count >= possible.len() {
break;
}
}
// After this point there needs to be only one true per field in possible
let mut result = 1;
for (i, r) in input.1.iter().enumerate() {
if r.field_name.contains("departure") {
for (dep_idx, f) in possible.iter().enumerate() {
if f[i] == true {
result *= input.0[0].fields[dep_idx];
}
}
}
}
result
}
// Helpers
fn remove_invalid(tickets: &Vec<Ticket>, rules: &Vec<Rule>) -> (usize, Vec<usize>) {
let mut error_count = 0;
let mut idx_vec = Vec::new();
for (idx, ticket) in tickets.iter().enumerate() {
let mut is_ticket_valid = true;
for field in ticket.fields.iter() {
let mut is_field_valid = false;
for rule in rules.iter() {
if (field >= &rule.fst_valid_first_idx && field <= &rule.fst_valid_last_idx)
|| (field >= &rule.snd_valid_first_idx && field <= &rule.snd_valid_last_idx)
{
is_field_valid = true;
break;
}
}
if !is_field_valid {
error_count += field;
is_ticket_valid = false;
}
}
if is_ticket_valid {
idx_vec.push(idx);
}
}
(error_count, idx_vec)
}
fn single_true(input: &Vec<bool>) -> (bool, usize) {
let mut true_count = 0;
let mut true_idx = 0;
for (i, b) in input.iter().enumerate() {
if b == &true {
true_count += 1;
true_idx = i;
if true_count > 1 {
return (false, 0);
}
}
}
(true, true_idx)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_input() {
assert_eq!(
input_generator(
"class: 1-3 or 5-7\n\
row: 6-11 or 33-44\n\
seat: 13-40 or 45-50\n\n\
your ticket:\n\
7,1,14\n\n\
nearby tickets:\n\
7,3,47\n\
40,4,50\n\
55,2,20\n\
38,6,12"
),
(
vec![
Ticket {
fields: vec![7, 1, 14],
},
Ticket {
fields: vec![7, 3, 47],
},
Ticket {
fields: vec![40, 4, 50],
},
Ticket {
fields: vec![55, 2, 20],
},
Ticket {
fields: vec![38, 6, 12],
}
],
vec![
Rule {
field_name: "class".to_string(),
fst_valid_first_idx: 1,
fst_valid_last_idx: 3,
snd_valid_first_idx: 5,
snd_valid_last_idx: 7,
},
Rule {
field_name: "row".to_string(),
fst_valid_first_idx: 6,
fst_valid_last_idx: 11,
snd_valid_first_idx: 33,
snd_valid_last_idx: 44,
},
Rule {
field_name: "seat".to_string(),
fst_valid_first_idx: 13,
fst_valid_last_idx: 40,
snd_valid_first_idx: 45,
snd_valid_last_idx: 50,
},
]
)
);
}
#[test]
fn test_part1() {
assert_eq!(
part1(&input_generator(
"class: 1-3 or 5-7\n\
row: 6-11 or 33-44\n\
seat: 13-40 or 45-50\n\n\
your ticket:\n\
7,1,14\n\n\
nearby tickets:\n\
7,3,47\n\
40,4,50\n\
55,2,20\n\
38,6,12"
)),
71
);
}
#[test]
fn test_idx_vec() {
let (tickets, rules) = input_generator(
"class: 1-3 or 5-7\n\
row: 6-11 or 33-44\n\
seat: 13-40 or 45-50\n\n\
your ticket:\n\
7,1,14\n\n\
nearby tickets:\n\
7,3,47\n\
40,4,50\n\
55,2,20\n\
38,6,12",
);
assert_eq!(remove_invalid(&tickets, &rules).1, vec![0, 1]);
}
}
|
use std::error::Error;
#[allow(unused_imports, dead_code)]
mod minitrace_generated;
use minitrace_generated::*;
pub fn serialize_to_fbs<'a>(
builder: &'a mut flatbuffers::FlatBufferBuilder,
minitrace::TraceDetails {
start_time_ns,
elapsed_ns,
cycles_per_second,
spans,
properties:
minitrace::Properties {
span_ids,
property_lens,
payload,
},
}: minitrace::TraceDetails,
) -> Result<&'a [u8], Box<dyn Error>> {
let mut spans_buf = Vec::with_capacity(spans.len());
for minitrace::Span {
id,
state,
related_id,
begin_cycles,
elapsed_cycles,
event,
} in spans
{
spans_buf.push(Span::new(
id,
match state {
minitrace::State::Root => State::Root,
minitrace::State::Local => State::Local,
minitrace::State::Spawning => State::Spawning,
minitrace::State::Scheduling => State::Scheduling,
minitrace::State::Settle => State::Settle,
},
related_id,
begin_cycles,
elapsed_cycles,
event,
))
}
let span_ids = Some(builder.create_vector_direct(&span_ids));
let property_lens = Some(builder.create_vector_direct(&property_lens));
let payload = Some(builder.create_vector_direct(&payload));
let properties = Properties::create(
builder,
&PropertiesArgs {
span_ids,
property_lens,
payload,
},
);
let spans = builder.create_vector_direct(&spans_buf);
let trace_details = TraceDetails::create(
builder,
&TraceDetailsArgs {
start_time_ns,
elapsed_ns,
cycles_per_second,
spans: Some(spans),
properties: Some(properties),
},
);
builder.finish(trace_details, None);
Ok(builder.finished_data())
}
|
use crate::{
command::{LapceUICommand, LAPCE_UI_COMMAND},
editor::Editor,
editor::EditorState,
editor::{EditorLocation, EditorView},
scroll::LapceScroll,
state::LapceTabState,
state::LapceUIState,
state::LAPCE_APP_STATE,
};
use std::{cmp::Ordering, sync::Arc};
use druid::{
kurbo::{Line, Rect},
widget::IdentityWrapper,
Command, Target, WidgetId, WindowId,
};
use druid::{
theme, BoxConstraints, Cursor, Data, Env, Event, EventCtx, LayoutCtx, LifeCycle,
LifeCycleCtx, PaintCtx, Point, RenderContext, Size, UpdateCtx, Widget,
WidgetExt, WidgetPod,
};
#[derive(Debug)]
pub enum SplitMoveDirection {
Up,
Down,
Right,
Left,
}
pub struct LapceSplit {
window_id: WindowId,
tab_id: WidgetId,
id: Option<WidgetId>,
vertical: bool,
pub children: Vec<ChildWidget>,
current_bar_hover: usize,
}
pub struct ChildWidget {
pub widget: WidgetPod<LapceUIState, Box<dyn Widget<LapceUIState>>>,
flex: bool,
params: f64,
layout_rect: Rect,
}
impl LapceSplit {
pub fn new(window_id: WindowId, tab_id: WidgetId, vertical: bool) -> Self {
LapceSplit {
window_id,
tab_id,
id: None,
vertical,
children: Vec::new(),
current_bar_hover: 0,
}
}
pub fn with_id(mut self, id: WidgetId) -> Self {
self.id = Some(id);
self
}
pub fn with_child(
mut self,
child: impl Widget<LapceUIState> + 'static,
params: f64,
) -> Self {
let child = ChildWidget {
widget: WidgetPod::new(child).boxed(),
flex: false,
params,
layout_rect: Rect::ZERO,
};
self.children.push(child);
self
}
pub fn with_flex_child(
mut self,
child: impl Widget<LapceUIState> + 'static,
params: f64,
) -> Self {
let child = ChildWidget {
widget: WidgetPod::new(child).boxed(),
flex: true,
params,
layout_rect: Rect::ZERO,
};
self.children.push(child);
self
}
pub fn even_flex_children(&mut self) {
for child in self.children.iter_mut() {
if child.flex {
child.params = 1.0;
}
}
}
fn update_split_point(&mut self, size: Size, mouse_pos: Point) {
let limit = 50.0;
let left = self.children[self.current_bar_hover - 1].layout_rect.x0;
let right = self.children[self.current_bar_hover].layout_rect.x1;
if mouse_pos.x < left + limit || mouse_pos.x > right - limit {
return;
}
if !self.children[self.current_bar_hover - 1].flex {
self.children[self.current_bar_hover - 1].params = mouse_pos.x - left;
} else {
if !self.children[self.current_bar_hover].flex {
self.children[self.current_bar_hover].params = right - mouse_pos.x;
}
for (i, child) in self.children.iter_mut().enumerate() {
if child.flex {
if i == self.current_bar_hover - 1 {
child.params = (mouse_pos.x - left) / size.width;
} else if i == self.current_bar_hover {
child.params = (right - mouse_pos.x) / size.width;
} else {
child.params = child.layout_rect.width() / size.width;
}
}
}
}
// let old_size = self.children_sizes[self.current_bar_hover];
// let new_size = mouse_pos.x / size.width
// - self.children_sizes[..self.current_bar_hover]
// .iter()
// .sum::<f64>();
// self.children_sizes[self.current_bar_hover] = new_size;
// self.children_sizes[self.current_bar_hover + 1] += old_size - new_size;
}
fn bar_hit_test(&self, size: Size, mouse_pos: Point) -> Option<usize> {
let children_len = self.children.len();
if children_len <= 1 {
return None;
}
for i in 1..children_len {
let x = self.children[i].layout_rect.x0;
if mouse_pos.x >= x - 3.0 && mouse_pos.x <= x + 3.0 {
return Some(i);
}
}
None
}
fn paint_bar(&mut self, ctx: &mut PaintCtx, env: &Env) {
let children_len = self.children.len();
if children_len <= 1 {
return;
}
let size = ctx.size();
for i in 1..children_len {
let x = self.children[i].layout_rect.x0;
let line = Line::new(Point::new(x, 0.0), Point::new(x, size.height));
let color = env.get(theme::BORDER_LIGHT);
ctx.stroke(line, &color, 1.0);
}
}
}
impl Widget<LapceUIState> for LapceSplit {
fn event(
&mut self,
ctx: &mut EventCtx,
event: &Event,
data: &mut LapceUIState,
env: &Env,
) {
match event {
Event::Internal(_) => {
for child in self.children.as_mut_slice() {
child.widget.event(ctx, event, data, env);
}
return;
}
Event::Command(cmd) => match cmd {
_ if cmd.is(LAPCE_UI_COMMAND) => {
let command = cmd.get_unchecked(LAPCE_UI_COMMAND);
match command {
LapceUICommand::RequestLayout => {
ctx.request_layout();
}
LapceUICommand::ApplyEdits(offset, rev, edits) => {
let state = LAPCE_APP_STATE
.get_tab_state(&self.window_id, &self.tab_id);
let mut editor_split = state.editor_split.lock();
if *offset
!= editor_split
.editors
.get(&editor_split.active)
.unwrap()
.selection
.get_cursor_offset()
{
return;
}
editor_split.apply_edits(ctx, data, *rev, edits);
}
LapceUICommand::ApplyEditsAndSave(offset, rev, result) => {
LAPCE_APP_STATE
.get_tab_state(&self.window_id, &self.tab_id)
.editor_split
.lock()
.apply_edits_and_save(
ctx, data, *offset, *rev, result,
);
}
LapceUICommand::GotoLocation(location) => {
let state = LAPCE_APP_STATE
.get_tab_state(&self.window_id, &self.tab_id);
let mut editor_split = state.editor_split.lock();
editor_split.go_to_location(ctx, data, location, env);
}
LapceUICommand::Split(vertical) => {
if self.children.len() <= 1 {
self.vertical = *vertical;
}
let state = LAPCE_APP_STATE
.get_tab_state(&self.window_id, &self.tab_id);
let mut editor_split = state.editor_split.lock();
let active = editor_split.active;
if &self.vertical != vertical {
for child in &self.children {
if child.widget.id() == active {}
}
} else {
let mut index = 0;
for (i, child) in self.children.iter().enumerate() {
if child.widget.id() == active {
index = i;
}
}
let mut new_editor = editor_split
.editors
.get(&active)
.unwrap()
.clone();
new_editor.view_id = WidgetId::next();
new_editor.editor_id = WidgetId::next();
let scroll_offset = new_editor.scroll_offset;
let new_editor_id = new_editor.editor_id.clone();
let new_view_id = new_editor.view_id.clone();
let split_id = new_editor.split_id.clone();
let tab_id = new_editor.tab_id.clone();
editor_split
.editors
.insert(new_view_id.clone(), new_editor);
let new_editor_ui = data.get_editor(&active).clone();
Arc::make_mut(&mut data.editors)
.insert(new_view_id.clone(), new_editor_ui);
let mut new_editor_view = EditorView::new(
self.window_id,
tab_id,
split_id,
new_view_id,
new_editor_id,
);
new_editor_view.editor.widget_mut().force_scroll_to(
scroll_offset.x,
scroll_offset.y,
);
let new_child = ChildWidget {
widget: WidgetPod::new(new_editor_view).boxed(),
flex: true,
params: 1.0,
layout_rect: Rect::ZERO,
};
self.children.insert(index + 1, new_child);
self.even_flex_children();
ctx.children_changed();
}
}
LapceUICommand::SplitClose => {
if self.children.len() == 1 {
return;
}
let state = LAPCE_APP_STATE
.get_tab_state(&self.window_id, &self.tab_id);
let mut editor_split = state.editor_split.lock();
let active = editor_split.active;
let buffer_id = editor_split
.editors
.get(&active)
.unwrap()
.buffer_id
.clone();
let mut index = 0;
for (i, child) in self.children.iter().enumerate() {
if child.widget.id() == active {
index = i;
}
}
let new_index = if index >= self.children.len() - 1 {
index - 1
} else {
index + 1
};
let new_active = self.children[new_index].widget.id();
self.children.remove(index);
editor_split.editors.remove(&active);
Arc::make_mut(&mut data.editors).remove(&active);
editor_split.active = new_active;
if let Some(buffer_id) = buffer_id {
editor_split
.clear_buffer_text_layouts(data, buffer_id);
}
self.even_flex_children();
ctx.children_changed();
}
LapceUICommand::SplitExchange => {
let state = LAPCE_APP_STATE
.get_tab_state(&self.window_id, &self.tab_id);
let mut editor_split = state.editor_split.lock();
let active = editor_split.active;
let mut index = 0;
for (i, child) in self.children.iter().enumerate() {
if child.widget.id() == active {
index = i;
}
}
if index >= self.children.len() - 1 {
} else {
editor_split.active =
self.children[index + 1].widget.id();
self.children.swap(index, index + 1);
ctx.request_layout();
}
}
LapceUICommand::SplitMove(direction) => {
let state = LAPCE_APP_STATE
.get_tab_state(&self.window_id, &self.tab_id);
let mut editor_split = state.editor_split.lock();
let active = editor_split.active;
let mut index = 0;
for (i, child) in self.children.iter().enumerate() {
if child.widget.id() == active {
index = i;
}
}
match direction {
SplitMoveDirection::Left => {
if index == 0 {
return;
}
editor_split.active =
self.children[index - 1].widget.id();
}
SplitMoveDirection::Right => {
if index >= self.children.len() - 1 {
return;
}
editor_split.active =
self.children[index + 1].widget.id();
}
_ => (),
}
let editor = editor_split
.editors
.get(&editor_split.active)
.unwrap();
let buffer = editor_split
.buffers
.get(editor.buffer_id.as_ref().unwrap())
.unwrap();
editor.ensure_cursor_visible(ctx, buffer, env, None);
ctx.request_paint();
}
_ => (),
}
}
_ => (),
},
_ => (),
}
for child in self.children.as_mut_slice() {
if child.widget.is_active() {
if child.widget.is_initialized() {
child.widget.event(ctx, event, data, env);
}
if ctx.is_handled() {
return;
}
}
}
match event {
Event::MouseDown(mouse) => {
if mouse.button.is_left() {
if let Some(bar_number) =
self.bar_hit_test(ctx.size(), mouse.pos)
{
self.current_bar_hover = bar_number;
ctx.set_active(true);
ctx.set_handled();
}
}
}
Event::MouseUp(mouse) => {
if mouse.button.is_left() && ctx.is_active() {
ctx.set_active(false);
self.update_split_point(ctx.size(), mouse.pos);
ctx.request_paint();
}
}
Event::MouseMove(mouse) => {
if ctx.is_active() {
self.update_split_point(ctx.size(), mouse.pos);
ctx.request_layout();
}
if ctx.is_hot() && self.bar_hit_test(ctx.size(), mouse.pos).is_some()
|| ctx.is_active()
{
match self.vertical {
true => ctx.set_cursor(&Cursor::ResizeLeftRight),
false => ctx.set_cursor(&Cursor::ResizeUpDown),
}
}
}
_ => (),
}
for child in self.children.as_mut_slice() {
if !child.widget.is_active() {
if child.widget.is_initialized() {
child.widget.event(ctx, event, data, env);
}
}
}
}
fn lifecycle(
&mut self,
ctx: &mut LifeCycleCtx,
event: &LifeCycle,
data: &LapceUIState,
env: &Env,
) {
for child in self.children.as_mut_slice() {
child.widget.lifecycle(ctx, event, data, env);
}
}
fn update(
&mut self,
ctx: &mut UpdateCtx,
old_data: &LapceUIState,
data: &LapceUIState,
env: &Env,
) {
for child in self.children.as_mut_slice() {
child.widget.update(ctx, &data, env);
}
}
fn layout(
&mut self,
ctx: &mut LayoutCtx,
bc: &BoxConstraints,
data: &LapceUIState,
env: &Env,
) -> Size {
let my_size = bc.max();
let children_len = self.children.len();
if children_len == 0 {
return my_size;
}
let mut non_flex_total = 0.0;
for child in self.children.iter_mut() {
if !child.flex {
let size = Size::new(child.params, my_size.height);
let child_size = child.widget.layout(
ctx,
&BoxConstraints::new(size, size),
data,
env,
);
child.layout_rect = child.layout_rect.with_size(child_size);
non_flex_total += child_size.width;
}
}
let mut flex_sum = 0.0;
for child in &self.children {
if child.flex {
flex_sum += child.params;
}
}
let flex_total = my_size.width - non_flex_total;
let mut x = 0.0;
let mut y = 0.0;
for child in self.children.iter_mut() {
child.layout_rect = child.layout_rect.with_origin(Point::new(x, y));
if !child.flex {
x += child.layout_rect.width();
} else {
let width = flex_total / flex_sum * child.params;
let size = Size::new(width, my_size.height);
child.widget.layout(
ctx,
&BoxConstraints::new(size, size),
data,
env,
);
child.layout_rect = child.layout_rect.with_size(size);
x += width;
}
child
.widget
.set_layout_rect(ctx, data, env, child.layout_rect);
}
my_size
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &LapceUIState, env: &Env) {
for child in self.children.as_mut_slice() {
child.widget.paint(ctx, &data, env);
}
self.paint_bar(ctx, env);
}
fn id(&self) -> Option<WidgetId> {
self.id
}
}
|
use std::fs::File;
use std::io::Read;
use regex::Regex;
use std::collections::HashSet;
fn get_directions(input: &str) -> Vec<(char, i32)> {
let re = Regex::new(r"([RL])(\d+)").unwrap();
re.captures_iter(input).map(|cap| {
(cap.get(1).unwrap().as_str().chars().nth(0).unwrap(),
cap.get(2).unwrap().as_str().parse::<i32>().unwrap())
}).collect::<Vec<_>>()
}
fn follow_directions(directions: &Vec<(char, i32)>) -> (i32, i32) {
let mut pos: (i32, i32) = (0, 0);
let mut aim = (0, 1);
for (dir, amount) in directions {
aim = if *dir == 'R' {(aim.1, -aim.0)} else {(-aim.1, aim.0)};
pos.0 += aim.0 * amount;
pos.1 += aim.1 * amount;
}
return pos;
}
fn follow_with_intersections(directions: &Vec<(char, i32)>) -> (i32, i32) {
let mut pos: (i32, i32) = (0, 0);
let mut aim = (0, 1);
let mut visited: HashSet<(i32, i32)> = HashSet::new();
visited.insert(pos);
for (dir, amount) in directions {
aim = if *dir == 'R' {(aim.1, -aim.0)} else {(-aim.1, aim.0)};
for _i in 0 .. *amount {
pos.0 += aim.0;
pos.1 += aim.1;
if visited.contains(&pos) {
return pos;
}
visited.insert(pos);
}
}
panic!("No place visited twice");
}
fn part1(directions: &Vec<(char, i32)>) -> i32 {
let endpos = follow_directions(directions);
endpos.0.abs() + endpos.1.abs()
}
fn part2(directions: &Vec<(char, i32)>) -> i32 {
let endpos = follow_with_intersections(directions);
endpos.0.abs() + endpos.1.abs()
}
fn main() {
let mut file = File::open("input01.txt").unwrap();
let mut text = String::new();
file.read_to_string(&mut text).unwrap();
let directions = get_directions(&text);
println!("Part1 result: {}", part1(&directions));
println!("Part2 result: {}", part2(&directions));
}
// --- tests ---
// R2, L3 -> 5
// R2, R2, R2 -> 2
// R5, L5, R5, R3 -> 12
#[test]
fn test_directions() {
assert_eq!(
get_directions("R2, L3, R100, L15"),
vec![
('R', 2),
('L', 3),
('R', 100),
('L', 15)
]
)
}
#[test]
fn test1() {
assert_eq!(part1(&get_directions(&"R2, L3")), 5);
}
#[test]
fn test2() {
assert_eq!(part1(&get_directions(&"R2, R2, R2")), 2);
}
#[test]
fn test3() {
assert_eq!(part1(&get_directions(&"R5, L5, R5, R3")), 12);
}
#[test]
fn test4() {
assert_eq!(part2(&get_directions(&"R8, R4, R4, R8")), 4);
} |
// This file is part of Substrate.
// Copyright (C) 2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Weights for pallet_democracy
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0
//! DATE: 2020-09-24, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: []
#![allow(unused_parens)]
#![allow(unused_imports)]
use frame_support::{traits::Get, weights::Weight};
use sp_std::marker::PhantomData;
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Trait> pallet_democracy::WeightInfo for WeightInfo<T> {
fn propose() -> Weight {
(96_316_000 as Weight)
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn second(s: u32) -> Weight {
(58_386_000 as Weight)
.saturating_add((259_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn vote_new(r: u32) -> Weight {
(70_374_000 as Weight)
.saturating_add((291_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn vote_existing(r: u32) -> Weight {
(70_097_000 as Weight)
.saturating_add((296_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn emergency_cancel() -> Weight {
(41_731_000 as Weight)
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn blacklist(p: u32) -> Weight {
(117_847_000 as Weight)
.saturating_add((871_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(6 as Weight))
}
fn external_propose(v: u32) -> Weight {
(20_972_000 as Weight)
.saturating_add((114_000 as Weight).saturating_mul(v as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn external_propose_majority() -> Weight {
(5_030_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn external_propose_default() -> Weight {
(4_981_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn fast_track() -> Weight {
(42_801_000 as Weight)
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn veto_external(v: u32) -> Weight {
(44_115_000 as Weight)
.saturating_add((194_000 as Weight).saturating_mul(v as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn cancel_proposal(p: u32) -> Weight {
(73_937_000 as Weight)
.saturating_add((962_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn cancel_referendum() -> Weight {
(25_233_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn cancel_queued(r: u32) -> Weight {
(48_251_000 as Weight)
.saturating_add((3_590_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn on_initialize_base(r: u32) -> Weight {
(17_597_000 as Weight)
.saturating_add((7_248_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight)))
}
fn delegate(r: u32) -> Weight {
(93_916_000 as Weight)
.saturating_add((10_794_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(4 as Weight))
.saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight)))
.saturating_add(T::DbWeight::get().writes(4 as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight)))
}
fn undelegate(r: u32) -> Weight {
(47_855_000 as Weight)
.saturating_add((10_805_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight)))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight)))
}
fn clear_public_proposals() -> Weight {
(4_864_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn note_preimage(b: u32) -> Weight {
(66_754_000 as Weight)
.saturating_add((4_000 as Weight).saturating_mul(b as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn note_imminent_preimage(b: u32) -> Weight {
(44_664_000 as Weight)
.saturating_add((3_000 as Weight).saturating_mul(b as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn reap_preimage(b: u32) -> Weight {
(59_968_000 as Weight)
.saturating_add((3_000 as Weight).saturating_mul(b as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn unlock_remove(r: u32) -> Weight {
(58_573_000 as Weight)
.saturating_add((131_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn unlock_set(r: u32) -> Weight {
(53_831_000 as Weight)
.saturating_add((324_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn remove_vote(r: u32) -> Weight {
(31_846_000 as Weight)
.saturating_add((327_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn remove_other_vote(r: u32) -> Weight {
(31_880_000 as Weight)
.saturating_add((222_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
}
|
/// PullRequest represents a pull request
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct PullRequest {
pub assignee: Option<crate::user::User>,
pub assignees: Option<Vec<crate::user::User>>,
pub base: Option<crate::pr_branch_info::PrBranchInfo>,
pub body: Option<String>,
pub closed_at: Option<String>,
pub comments: Option<i64>,
pub created_at: Option<String>,
pub diff_url: Option<String>,
pub due_date: Option<String>,
pub head: Option<crate::pr_branch_info::PrBranchInfo>,
pub html_url: Option<String>,
pub id: Option<i64>,
pub is_locked: Option<bool>,
pub labels: Option<Vec<crate::label::Label>>,
pub merge_base: Option<String>,
pub merge_commit_sha: Option<String>,
pub mergeable: Option<bool>,
pub merged: Option<bool>,
pub merged_at: Option<String>,
pub merged_by: Option<crate::user::User>,
pub milestone: Option<crate::milestone::Milestone>,
pub number: Option<i64>,
pub patch_url: Option<String>,
pub state: Option<String>,
pub title: Option<String>,
pub updated_at: Option<String>,
pub url: Option<String>,
pub user: Option<crate::user::User>,
}
impl PullRequest {
/// Create a builder for this object.
#[inline]
pub fn builder() -> PullRequestBuilder {
PullRequestBuilder {
body: Default::default(),
}
}
#[inline]
pub fn repo_list_pull_requests() -> PullRequestGetBuilder<crate::generics::MissingOwner, crate::generics::MissingRepo> {
PullRequestGetBuilder {
inner: Default::default(),
_param_owner: core::marker::PhantomData,
_param_repo: core::marker::PhantomData,
}
}
#[inline]
pub fn repo_get_pull_request() -> PullRequestGetBuilder1<crate::generics::MissingOwner, crate::generics::MissingRepo, crate::generics::MissingIndex> {
PullRequestGetBuilder1 {
inner: Default::default(),
_param_owner: core::marker::PhantomData,
_param_repo: core::marker::PhantomData,
_param_index: core::marker::PhantomData,
}
}
}
impl Into<PullRequest> for PullRequestBuilder {
fn into(self) -> PullRequest {
self.body
}
}
/// Builder for [`PullRequest`](./struct.PullRequest.html) object.
#[derive(Debug, Clone)]
pub struct PullRequestBuilder {
body: self::PullRequest,
}
impl PullRequestBuilder {
#[inline]
pub fn assignee(mut self, value: crate::user::User) -> Self {
self.body.assignee = Some(value.into());
self
}
#[inline]
pub fn assignees(mut self, value: impl Iterator<Item = crate::user::User>) -> Self {
self.body.assignees = Some(value.map(|value| value.into()).collect::<Vec<_>>().into());
self
}
#[inline]
pub fn base(mut self, value: crate::pr_branch_info::PrBranchInfo) -> Self {
self.body.base = Some(value.into());
self
}
#[inline]
pub fn body(mut self, value: impl Into<String>) -> Self {
self.body.body = Some(value.into());
self
}
#[inline]
pub fn closed_at(mut self, value: impl Into<String>) -> Self {
self.body.closed_at = Some(value.into());
self
}
#[inline]
pub fn comments(mut self, value: impl Into<i64>) -> Self {
self.body.comments = Some(value.into());
self
}
#[inline]
pub fn created_at(mut self, value: impl Into<String>) -> Self {
self.body.created_at = Some(value.into());
self
}
#[inline]
pub fn diff_url(mut self, value: impl Into<String>) -> Self {
self.body.diff_url = Some(value.into());
self
}
#[inline]
pub fn due_date(mut self, value: impl Into<String>) -> Self {
self.body.due_date = Some(value.into());
self
}
#[inline]
pub fn head(mut self, value: crate::pr_branch_info::PrBranchInfo) -> Self {
self.body.head = Some(value.into());
self
}
#[inline]
pub fn html_url(mut self, value: impl Into<String>) -> Self {
self.body.html_url = Some(value.into());
self
}
#[inline]
pub fn id(mut self, value: impl Into<i64>) -> Self {
self.body.id = Some(value.into());
self
}
#[inline]
pub fn is_locked(mut self, value: impl Into<bool>) -> Self {
self.body.is_locked = Some(value.into());
self
}
#[inline]
pub fn labels(mut self, value: impl Iterator<Item = crate::label::Label>) -> Self {
self.body.labels = Some(value.map(|value| value.into()).collect::<Vec<_>>().into());
self
}
#[inline]
pub fn merge_base(mut self, value: impl Into<String>) -> Self {
self.body.merge_base = Some(value.into());
self
}
#[inline]
pub fn merge_commit_sha(mut self, value: impl Into<String>) -> Self {
self.body.merge_commit_sha = Some(value.into());
self
}
#[inline]
pub fn mergeable(mut self, value: impl Into<bool>) -> Self {
self.body.mergeable = Some(value.into());
self
}
#[inline]
pub fn merged(mut self, value: impl Into<bool>) -> Self {
self.body.merged = Some(value.into());
self
}
#[inline]
pub fn merged_at(mut self, value: impl Into<String>) -> Self {
self.body.merged_at = Some(value.into());
self
}
#[inline]
pub fn merged_by(mut self, value: crate::user::User) -> Self {
self.body.merged_by = Some(value.into());
self
}
#[inline]
pub fn milestone(mut self, value: crate::milestone::Milestone) -> Self {
self.body.milestone = Some(value.into());
self
}
#[inline]
pub fn number(mut self, value: impl Into<i64>) -> Self {
self.body.number = Some(value.into());
self
}
#[inline]
pub fn patch_url(mut self, value: impl Into<String>) -> Self {
self.body.patch_url = Some(value.into());
self
}
#[inline]
pub fn state(mut self, value: impl Into<String>) -> Self {
self.body.state = Some(value.into());
self
}
#[inline]
pub fn title(mut self, value: impl Into<String>) -> Self {
self.body.title = Some(value.into());
self
}
#[inline]
pub fn updated_at(mut self, value: impl Into<String>) -> Self {
self.body.updated_at = Some(value.into());
self
}
#[inline]
pub fn url(mut self, value: impl Into<String>) -> Self {
self.body.url = Some(value.into());
self
}
#[inline]
pub fn user(mut self, value: crate::user::User) -> Self {
self.body.user = Some(value.into());
self
}
}
/// Builder created by [`PullRequest::repo_list_pull_requests`](./struct.PullRequest.html#method.repo_list_pull_requests) method for a `GET` operation associated with `PullRequest`.
#[repr(transparent)]
#[derive(Debug, Clone)]
pub struct PullRequestGetBuilder<Owner, Repo> {
inner: PullRequestGetBuilderContainer,
_param_owner: core::marker::PhantomData<Owner>,
_param_repo: core::marker::PhantomData<Repo>,
}
#[derive(Debug, Default, Clone)]
struct PullRequestGetBuilderContainer {
param_owner: Option<String>,
param_repo: Option<String>,
param_state: Option<String>,
param_sort: Option<String>,
param_milestone: Option<i64>,
param_labels: Option<crate::util::Delimited<i64, crate::util::Multi>>,
param_page: Option<i64>,
param_limit: Option<i64>,
}
impl<Owner, Repo> PullRequestGetBuilder<Owner, Repo> {
/// owner of the repo
#[inline]
pub fn owner(mut self, value: impl Into<String>) -> PullRequestGetBuilder<crate::generics::OwnerExists, Repo> {
self.inner.param_owner = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// name of the repo
#[inline]
pub fn repo(mut self, value: impl Into<String>) -> PullRequestGetBuilder<Owner, crate::generics::RepoExists> {
self.inner.param_repo = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// State of pull request: open or closed (optional)
#[inline]
pub fn state(mut self, value: impl Into<String>) -> Self {
self.inner.param_state = Some(value.into());
self
}
/// Type of sort
#[inline]
pub fn sort(mut self, value: impl Into<String>) -> Self {
self.inner.param_sort = Some(value.into());
self
}
/// ID of the milestone
#[inline]
pub fn milestone(mut self, value: impl Into<i64>) -> Self {
self.inner.param_milestone = Some(value.into());
self
}
/// Label IDs
#[inline]
pub fn labels(mut self, value: impl Iterator<Item = impl Into<i64>>) -> Self {
self.inner.param_labels = Some(value.map(|value| value.into()).collect::<Vec<_>>().into());
self
}
/// page number of results to return (1-based)
#[inline]
pub fn page(mut self, value: impl Into<i64>) -> Self {
self.inner.param_page = Some(value.into());
self
}
/// page size of results
#[inline]
pub fn limit(mut self, value: impl Into<i64>) -> Self {
self.inner.param_limit = Some(value.into());
self
}
}
impl<Client: crate::client::ApiClient + Sync + 'static> crate::client::Sendable<Client> for PullRequestGetBuilder<crate::generics::OwnerExists, crate::generics::RepoExists> {
type Output = Vec<PullRequest>;
const METHOD: http::Method = http::Method::GET;
fn rel_path(&self) -> std::borrow::Cow<'static, str> {
format!("/repos/{owner}/{repo}/pulls", owner=self.inner.param_owner.as_ref().expect("missing parameter owner?"), repo=self.inner.param_repo.as_ref().expect("missing parameter repo?")).into()
}
fn modify(&self, req: Client::Request) -> Result<Client::Request, crate::client::ApiError<Client::Response>> {
use crate::client::Request;
Ok(req
.query(&[
("state", self.inner.param_state.as_ref().map(std::string::ToString::to_string)),
("sort", self.inner.param_sort.as_ref().map(std::string::ToString::to_string)),
("milestone", self.inner.param_milestone.as_ref().map(std::string::ToString::to_string)),
("page", self.inner.param_page.as_ref().map(std::string::ToString::to_string)),
("limit", self.inner.param_limit.as_ref().map(std::string::ToString::to_string))
])
.query({
&self.inner.param_labels.as_ref().map(|v| {
v.iter().map(|v| ("labels", v.to_string())).collect::<Vec<_>>()
}).unwrap_or_default()
}))
}
}
/// Builder created by [`PullRequest::repo_get_pull_request`](./struct.PullRequest.html#method.repo_get_pull_request) method for a `GET` operation associated with `PullRequest`.
#[repr(transparent)]
#[derive(Debug, Clone)]
pub struct PullRequestGetBuilder1<Owner, Repo, Index> {
inner: PullRequestGetBuilder1Container,
_param_owner: core::marker::PhantomData<Owner>,
_param_repo: core::marker::PhantomData<Repo>,
_param_index: core::marker::PhantomData<Index>,
}
#[derive(Debug, Default, Clone)]
struct PullRequestGetBuilder1Container {
param_owner: Option<String>,
param_repo: Option<String>,
param_index: Option<i64>,
}
impl<Owner, Repo, Index> PullRequestGetBuilder1<Owner, Repo, Index> {
/// owner of the repo
#[inline]
pub fn owner(mut self, value: impl Into<String>) -> PullRequestGetBuilder1<crate::generics::OwnerExists, Repo, Index> {
self.inner.param_owner = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// name of the repo
#[inline]
pub fn repo(mut self, value: impl Into<String>) -> PullRequestGetBuilder1<Owner, crate::generics::RepoExists, Index> {
self.inner.param_repo = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// index of the pull request to get
#[inline]
pub fn index(mut self, value: impl Into<i64>) -> PullRequestGetBuilder1<Owner, Repo, crate::generics::IndexExists> {
self.inner.param_index = Some(value.into());
unsafe { std::mem::transmute(self) }
}
}
impl<Client: crate::client::ApiClient + Sync + 'static> crate::client::Sendable<Client> for PullRequestGetBuilder1<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IndexExists> {
type Output = PullRequest;
const METHOD: http::Method = http::Method::GET;
fn rel_path(&self) -> std::borrow::Cow<'static, str> {
format!("/repos/{owner}/{repo}/pulls/{index}", owner=self.inner.param_owner.as_ref().expect("missing parameter owner?"), repo=self.inner.param_repo.as_ref().expect("missing parameter repo?"), index=self.inner.param_index.as_ref().expect("missing parameter index?")).into()
}
}
|
#[doc = "Register `FLTR` reader"]
pub type R = crate::R<FLTR_SPEC>;
#[doc = "Register `FLTR` writer"]
pub type W = crate::W<FLTR_SPEC>;
#[doc = "Field `DNF` reader - Digital noise filter"]
pub type DNF_R = crate::FieldReader<DNF_A>;
#[doc = "Digital noise filter\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum DNF_A {
#[doc = "0: Digital filter disabled"]
NoFilter = 0,
#[doc = "1: Digital filter enabled and filtering capability up to 1 tI2CCLK"]
Filter1 = 1,
#[doc = "2: Digital filter enabled and filtering capability up to 2 tI2CCLK"]
Filter2 = 2,
#[doc = "3: Digital filter enabled and filtering capability up to 3 tI2CCLK"]
Filter3 = 3,
#[doc = "4: Digital filter enabled and filtering capability up to 4 tI2CCLK"]
Filter4 = 4,
#[doc = "5: Digital filter enabled and filtering capability up to 5 tI2CCLK"]
Filter5 = 5,
#[doc = "6: Digital filter enabled and filtering capability up to 6 tI2CCLK"]
Filter6 = 6,
#[doc = "7: Digital filter enabled and filtering capability up to 7 tI2CCLK"]
Filter7 = 7,
#[doc = "8: Digital filter enabled and filtering capability up to 8 tI2CCLK"]
Filter8 = 8,
#[doc = "9: Digital filter enabled and filtering capability up to 9 tI2CCLK"]
Filter9 = 9,
#[doc = "10: Digital filter enabled and filtering capability up to 10 tI2CCLK"]
Filter10 = 10,
#[doc = "11: Digital filter enabled and filtering capability up to 11 tI2CCLK"]
Filter11 = 11,
#[doc = "12: Digital filter enabled and filtering capability up to 12 tI2CCLK"]
Filter12 = 12,
#[doc = "13: Digital filter enabled and filtering capability up to 13 tI2CCLK"]
Filter13 = 13,
#[doc = "14: Digital filter enabled and filtering capability up to 14 tI2CCLK"]
Filter14 = 14,
#[doc = "15: Digital filter enabled and filtering capability up to 15 tI2CCLK"]
Filter15 = 15,
}
impl From<DNF_A> for u8 {
#[inline(always)]
fn from(variant: DNF_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for DNF_A {
type Ux = u8;
}
impl DNF_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DNF_A {
match self.bits {
0 => DNF_A::NoFilter,
1 => DNF_A::Filter1,
2 => DNF_A::Filter2,
3 => DNF_A::Filter3,
4 => DNF_A::Filter4,
5 => DNF_A::Filter5,
6 => DNF_A::Filter6,
7 => DNF_A::Filter7,
8 => DNF_A::Filter8,
9 => DNF_A::Filter9,
10 => DNF_A::Filter10,
11 => DNF_A::Filter11,
12 => DNF_A::Filter12,
13 => DNF_A::Filter13,
14 => DNF_A::Filter14,
15 => DNF_A::Filter15,
_ => unreachable!(),
}
}
#[doc = "Digital filter disabled"]
#[inline(always)]
pub fn is_no_filter(&self) -> bool {
*self == DNF_A::NoFilter
}
#[doc = "Digital filter enabled and filtering capability up to 1 tI2CCLK"]
#[inline(always)]
pub fn is_filter1(&self) -> bool {
*self == DNF_A::Filter1
}
#[doc = "Digital filter enabled and filtering capability up to 2 tI2CCLK"]
#[inline(always)]
pub fn is_filter2(&self) -> bool {
*self == DNF_A::Filter2
}
#[doc = "Digital filter enabled and filtering capability up to 3 tI2CCLK"]
#[inline(always)]
pub fn is_filter3(&self) -> bool {
*self == DNF_A::Filter3
}
#[doc = "Digital filter enabled and filtering capability up to 4 tI2CCLK"]
#[inline(always)]
pub fn is_filter4(&self) -> bool {
*self == DNF_A::Filter4
}
#[doc = "Digital filter enabled and filtering capability up to 5 tI2CCLK"]
#[inline(always)]
pub fn is_filter5(&self) -> bool {
*self == DNF_A::Filter5
}
#[doc = "Digital filter enabled and filtering capability up to 6 tI2CCLK"]
#[inline(always)]
pub fn is_filter6(&self) -> bool {
*self == DNF_A::Filter6
}
#[doc = "Digital filter enabled and filtering capability up to 7 tI2CCLK"]
#[inline(always)]
pub fn is_filter7(&self) -> bool {
*self == DNF_A::Filter7
}
#[doc = "Digital filter enabled and filtering capability up to 8 tI2CCLK"]
#[inline(always)]
pub fn is_filter8(&self) -> bool {
*self == DNF_A::Filter8
}
#[doc = "Digital filter enabled and filtering capability up to 9 tI2CCLK"]
#[inline(always)]
pub fn is_filter9(&self) -> bool {
*self == DNF_A::Filter9
}
#[doc = "Digital filter enabled and filtering capability up to 10 tI2CCLK"]
#[inline(always)]
pub fn is_filter10(&self) -> bool {
*self == DNF_A::Filter10
}
#[doc = "Digital filter enabled and filtering capability up to 11 tI2CCLK"]
#[inline(always)]
pub fn is_filter11(&self) -> bool {
*self == DNF_A::Filter11
}
#[doc = "Digital filter enabled and filtering capability up to 12 tI2CCLK"]
#[inline(always)]
pub fn is_filter12(&self) -> bool {
*self == DNF_A::Filter12
}
#[doc = "Digital filter enabled and filtering capability up to 13 tI2CCLK"]
#[inline(always)]
pub fn is_filter13(&self) -> bool {
*self == DNF_A::Filter13
}
#[doc = "Digital filter enabled and filtering capability up to 14 tI2CCLK"]
#[inline(always)]
pub fn is_filter14(&self) -> bool {
*self == DNF_A::Filter14
}
#[doc = "Digital filter enabled and filtering capability up to 15 tI2CCLK"]
#[inline(always)]
pub fn is_filter15(&self) -> bool {
*self == DNF_A::Filter15
}
}
#[doc = "Field `DNF` writer - Digital noise filter"]
pub type DNF_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 4, O, DNF_A>;
impl<'a, REG, const O: u8> DNF_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "Digital filter disabled"]
#[inline(always)]
pub fn no_filter(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::NoFilter)
}
#[doc = "Digital filter enabled and filtering capability up to 1 tI2CCLK"]
#[inline(always)]
pub fn filter1(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter1)
}
#[doc = "Digital filter enabled and filtering capability up to 2 tI2CCLK"]
#[inline(always)]
pub fn filter2(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter2)
}
#[doc = "Digital filter enabled and filtering capability up to 3 tI2CCLK"]
#[inline(always)]
pub fn filter3(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter3)
}
#[doc = "Digital filter enabled and filtering capability up to 4 tI2CCLK"]
#[inline(always)]
pub fn filter4(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter4)
}
#[doc = "Digital filter enabled and filtering capability up to 5 tI2CCLK"]
#[inline(always)]
pub fn filter5(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter5)
}
#[doc = "Digital filter enabled and filtering capability up to 6 tI2CCLK"]
#[inline(always)]
pub fn filter6(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter6)
}
#[doc = "Digital filter enabled and filtering capability up to 7 tI2CCLK"]
#[inline(always)]
pub fn filter7(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter7)
}
#[doc = "Digital filter enabled and filtering capability up to 8 tI2CCLK"]
#[inline(always)]
pub fn filter8(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter8)
}
#[doc = "Digital filter enabled and filtering capability up to 9 tI2CCLK"]
#[inline(always)]
pub fn filter9(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter9)
}
#[doc = "Digital filter enabled and filtering capability up to 10 tI2CCLK"]
#[inline(always)]
pub fn filter10(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter10)
}
#[doc = "Digital filter enabled and filtering capability up to 11 tI2CCLK"]
#[inline(always)]
pub fn filter11(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter11)
}
#[doc = "Digital filter enabled and filtering capability up to 12 tI2CCLK"]
#[inline(always)]
pub fn filter12(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter12)
}
#[doc = "Digital filter enabled and filtering capability up to 13 tI2CCLK"]
#[inline(always)]
pub fn filter13(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter13)
}
#[doc = "Digital filter enabled and filtering capability up to 14 tI2CCLK"]
#[inline(always)]
pub fn filter14(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter14)
}
#[doc = "Digital filter enabled and filtering capability up to 15 tI2CCLK"]
#[inline(always)]
pub fn filter15(self) -> &'a mut crate::W<REG> {
self.variant(DNF_A::Filter15)
}
}
#[doc = "Field `ANOFF` reader - Analog noise filter"]
pub type ANOFF_R = crate::BitReader<ANOFF_A>;
#[doc = "Analog noise filter\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ANOFF_A {
#[doc = "0: Analog noise filter enabled"]
Enabled = 0,
#[doc = "1: Analog noise filter disabled"]
Disabled = 1,
}
impl From<ANOFF_A> for bool {
#[inline(always)]
fn from(variant: ANOFF_A) -> Self {
variant as u8 != 0
}
}
impl ANOFF_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ANOFF_A {
match self.bits {
false => ANOFF_A::Enabled,
true => ANOFF_A::Disabled,
}
}
#[doc = "Analog noise filter enabled"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == ANOFF_A::Enabled
}
#[doc = "Analog noise filter disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == ANOFF_A::Disabled
}
}
#[doc = "Field `ANOFF` writer - Analog noise filter"]
pub type ANOFF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ANOFF_A>;
impl<'a, REG, const O: u8> ANOFF_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Analog noise filter enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(ANOFF_A::Enabled)
}
#[doc = "Analog noise filter disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(ANOFF_A::Disabled)
}
}
impl R {
#[doc = "Bits 0:3 - Digital noise filter"]
#[inline(always)]
pub fn dnf(&self) -> DNF_R {
DNF_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bit 4 - Analog noise filter"]
#[inline(always)]
pub fn anoff(&self) -> ANOFF_R {
ANOFF_R::new(((self.bits >> 4) & 1) != 0)
}
}
impl W {
#[doc = "Bits 0:3 - Digital noise filter"]
#[inline(always)]
#[must_use]
pub fn dnf(&mut self) -> DNF_W<FLTR_SPEC, 0> {
DNF_W::new(self)
}
#[doc = "Bit 4 - Analog noise filter"]
#[inline(always)]
#[must_use]
pub fn anoff(&mut self) -> ANOFF_W<FLTR_SPEC, 4> {
ANOFF_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "FLTR register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`fltr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`fltr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct FLTR_SPEC;
impl crate::RegisterSpec for FLTR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`fltr::R`](R) reader structure"]
impl crate::Readable for FLTR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`fltr::W`](W) writer structure"]
impl crate::Writable for FLTR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets FLTR to value 0"]
impl crate::Resettable for FLTR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use helpers::Class;
use super::{Artifact, Build, BuildStatus, ShortBuild};
use action::CommonAction;
use changeset;
use user::ShortUser;
build_with_common_fields_and_impl!(/// A `Build` from a MatrixProject
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct MatrixBuild {
/// Change set for this build
pub change_set: changeset::CommonChangeSetList,
/// Runs of each configuration
pub runs: Vec<ShortBuild>,
/// Which slave was it build on
pub built_on: String,
/// List of user ids who made a change since the last non-broken build
pub culprits: Vec<ShortUser>,
});
register_class!("hudson.matrix.MatrixBuild" => MatrixBuild);
impl MatrixBuild {}
build_with_common_fields_and_impl!(/// A `Build` from a MatrixConfiguration
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct MatrixRun {
/// Change set for this build
pub change_set: changeset::CommonChangeSetList,
/// Which slave was it build on
pub built_on: String,
/// List of user ids who made a change since the last non-broken build
pub culprits: Vec<ShortUser>,
});
register_class!("hudson.matrix.MatrixRun" => MatrixRun);
impl MatrixRun {}
|
pub mod english;
pub mod break_xor_cipher;
pub mod frequency_analysis;
#[cfg(test)]
mod tests;
|
use std::num::Wrapping;
pub fn test(a1: i32, a2: i32) -> i32
{
(Wrapping(a1) + Wrapping(a2)).0
}
pub struct Foo
{
Field: i32
}
impl Foo
{
pub fn new(field: i32) -> Foo
{
Foo{ Field: field }
}
}
pub struct Bar
{
}
impl Bar
{
pub fn new() -> Bar
{
Bar{}
}
}
pub trait GetValueTrait
{
fn GetValue(&self) -> i32;
fn ToDyn(&self) -> &dyn GetValueTrait
where Self: std::marker::Sized
{
self
}
}
impl GetValueTrait for Foo
{
fn GetValue(&self) -> i32
{
self.Field
}
}
impl GetValueTrait for Bar
{
fn GetValue(&self) -> i32
{
233
}
}
pub fn GetValue(o : &dyn GetValueTrait) -> i32
{
o.GetValue()
}
#[no_mangle]
pub fn rust_mian() -> i32
{
let foo = Foo::new(123);
let bar = Bar::new();
test(GetValue((&foo).ToDyn()), GetValue((&bar).ToDyn()))
}
|
static mut FIRST_FREE_PAGE: usize = 0x800000;
pub fn alloc_page(num: usize) -> usize {
unsafe {
let result = FIRST_FREE_PAGE;
FIRST_FREE_PAGE += num * 4096;
::mem::memset(result + 0xFFFFFFFF_80000000, 0, num * 4096);
result
}
}
|
use std::ops::Mul;
trait HasArea<T> {
fn area(&self)->T;
}
struct Square<T> {
x: T,
y: T,
side: T,
}
impl<T> HasArea<T> for Square<T>
where T: Mul<Output=T> + Copy {
fn area(&self) -> T {
self.y * self.x
}
}
fn main() {
let s = Square {
x: 3.0f64,
y: 1.0f64,
side: 3.0f64,
};
println!("Area of s: {}",s.area());
}
|
use trillium::http_types::{auth::BasicAuth as AuthHeader, StatusCode};
use trillium::{async_trait, Conn, Handler};
pub struct BasicAuth {
username: String,
password: String,
realm: Option<String>,
}
impl BasicAuth {
pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
Self {
username: username.into(),
password: password.into(),
realm: None,
}
}
pub fn is_allowed(&self, conn: &Conn) -> bool {
if let Ok(Some(auth)) = AuthHeader::from_headers(conn.headers()) {
auth.username() == self.username && auth.password() == self.password
} else {
false
}
}
pub fn www_authenticate(&self) -> String {
match self.realm {
Some(ref realm) => format!("Basic realm={}", realm),
None => String::from("Basic"),
}
}
pub fn deny(clippy::dbg_macro,&self, conn: Conn) -> Conn {
conn.with_status(StatusCode::Unauthorized)
.with_header(("www-authenticate", &*self.www_authenticate()))
.halt()
}
}
#[async_trait]
impl Handler for BasicAuth {
async fn run(&self, conn: Conn) -> Conn {
if self.is_allowed(&conn) {
conn
} else {
self.deny(clippy::dbg_macro,conn)
}
}
}
|
use serde::Deserialize;
use crate::{bson::Document, options::ClientOptions, test::run_spec_test};
#[derive(Debug, Deserialize)]
struct TestFile {
pub tests: Vec<TestCase>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct TestCase {
pub description: String,
pub uri: String,
pub valid: bool,
pub read_concern: Option<Document>,
pub write_concern: Option<Document>,
}
fn normalize_write_concern_doc(mut write_concern_doc: Document) -> Document {
if let Some(w_timeout) = write_concern_doc.remove("wtimeout") {
write_concern_doc.insert("wtimeoutMS", w_timeout);
}
if let Some(j) = write_concern_doc.remove("j") {
write_concern_doc.insert("journal", j);
}
write_concern_doc
}
async fn run_connection_string_test(test_file: TestFile) {
for test_case in test_file.tests {
match ClientOptions::parse(&test_case.uri).await {
Ok(options) => {
assert!(test_case.valid);
if let Some(ref expected_read_concern) = test_case.read_concern {
let mut actual_read_concern = Document::new();
if let Some(client_read_concern) = options.read_concern {
actual_read_concern.insert("level", client_read_concern.level.as_str());
}
assert_eq!(
&actual_read_concern, expected_read_concern,
"{}",
test_case.description
);
}
if let Some(ref write_concern) = test_case.write_concern {
assert_eq!(
&normalize_write_concern_doc(
options
.write_concern
.map(|w| super::write_concern_to_document(&w)
.expect(&test_case.description))
.unwrap_or_default()
),
write_concern,
"{}",
test_case.description
);
}
}
Err(_) => {
assert!(!test_case.valid, "{}", test_case.description);
}
};
}
}
#[cfg_attr(feature = "tokio-runtime", tokio::test)]
#[cfg_attr(feature = "async-std-runtime", async_std::test)]
async fn run() {
run_spec_test(
&["read-write-concern", "connection-string"],
run_connection_string_test,
)
.await;
}
|
pub enum JumpConditionError {
InvalidLookupInput,
}
type JumpConditionValue = &'static str;
type JumpConditionLookupResult = Result<JumpConditionValue, JumpConditionError>;
pub const NZ: JumpConditionValue = "NZ";
pub const Z: JumpConditionValue = "Z";
pub const NC: JumpConditionValue = "NC";
pub const C: JumpConditionValue = "C";
pub fn lookup_jump_condition(input: u8) -> JumpConditionLookupResult {
match input {
0b00 => Ok(NZ),
0b01 => Ok(Z),
0b10 => Ok(NC),
0b11 => Ok(C),
_ => Err(JumpConditionError::InvalidLookupInput),
}
}
|
use ash::version::DeviceV1_0;
use ash::{vk, Device};
use rustc_hash::FxHashMap;
use std::ffi::CString;
use anyhow::Result;
use crate::vulkan::render_pass::Framebuffers;
use crate::vulkan::texture::{Gradients, Texture};
use crate::vulkan::GfaestusVk;
use super::create_shader_module;
pub struct GuiPipeline {
descriptor_pool: vk::DescriptorPool,
descriptor_set_layout: vk::DescriptorSetLayout,
texture_sets: Vec<vk::DescriptorSet>,
sampler: vk::Sampler,
egui_texture_set: vk::DescriptorSet,
egui_texture: Texture,
egui_texture_version: u64,
texture_set_map: FxHashMap<u64, vk::DescriptorSet>,
pub vertices: GuiVertices,
tex_2d_pipeline_layout: vk::PipelineLayout,
tex_2d_pipeline: vk::Pipeline,
tex_rgba_pipeline_layout: vk::PipelineLayout,
tex_rgba_pipeline: vk::Pipeline,
device: Device,
}
impl GuiPipeline {
pub fn new(
app: &super::super::GfaestusVk,
render_pass: vk::RenderPass,
) -> Result<Self> {
let device = app.vk_context().device();
let desc_set_layout = Self::create_descriptor_set_layout(device)?;
let max_texture_count = 64;
let descriptor_pool = {
let sampler_size = vk::DescriptorPoolSize {
ty: vk::DescriptorType::COMBINED_IMAGE_SAMPLER,
descriptor_count: max_texture_count,
};
// let value_size = vk::DescriptorPoolSize {
// ty: vk::DescriptorType::Com
// descriptor_count: image_count,
// };
let pool_sizes = [sampler_size];
let pool_info = vk::DescriptorPoolCreateInfo::builder()
.pool_sizes(&pool_sizes)
.max_sets(max_texture_count)
.build();
unsafe { device.create_descriptor_pool(&pool_info, None) }
}?;
let egui_texture_sets = {
let layouts = vec![desc_set_layout];
let alloc_info = vk::DescriptorSetAllocateInfo::builder()
.descriptor_pool(descriptor_pool)
.set_layouts(&layouts)
.build();
unsafe { device.allocate_descriptor_sets(&alloc_info) }
}?;
let (tex_2d_pipeline, tex_2d_pipeline_layout) = Self::create_pipeline(
device,
render_pass,
desc_set_layout,
crate::load_shader!("gui/gui_2d.frag.spv"),
);
let (tex_rgba_pipeline, tex_rgba_pipeline_layout) =
Self::create_pipeline(
device,
render_pass,
desc_set_layout,
crate::load_shader!("gui/gui_rgba.frag.spv"),
);
let sampler = {
let sampler_info = vk::SamplerCreateInfo::builder()
.mag_filter(vk::Filter::NEAREST)
.min_filter(vk::Filter::NEAREST)
// .mag_filter(vk::Filter::LINEAR)
// .min_filter(vk::Filter::LINEAR)
.address_mode_u(vk::SamplerAddressMode::CLAMP_TO_EDGE)
.address_mode_v(vk::SamplerAddressMode::CLAMP_TO_EDGE)
.address_mode_w(vk::SamplerAddressMode::CLAMP_TO_EDGE)
.anisotropy_enable(false)
.border_color(vk::BorderColor::INT_OPAQUE_BLACK)
.unnormalized_coordinates(false)
.mipmap_mode(vk::SamplerMipmapMode::NEAREST)
.mip_lod_bias(0.0)
.min_lod(0.0)
.max_lod(1.0)
.build();
unsafe { device.create_sampler(&sampler_info, None) }
}?;
let egui_texture = Texture::null();
let vertices = GuiVertices::new(device);
let texture_set_map = FxHashMap::default();
Ok(Self {
descriptor_pool,
descriptor_set_layout: desc_set_layout,
texture_sets: Vec::new(),
sampler,
egui_texture_set: egui_texture_sets[0],
egui_texture,
egui_texture_version: 0,
texture_set_map,
vertices,
tex_2d_pipeline_layout,
tex_2d_pipeline,
tex_rgba_pipeline_layout,
tex_rgba_pipeline,
device: device.clone(),
})
}
pub fn draw(
&self,
cmd_buf: vk::CommandBuffer,
render_pass: vk::RenderPass,
framebuffers: &Framebuffers,
viewport_dims: [f32; 2],
) -> Result<()> {
let device = &self.device;
let clear_values = [];
let extent = vk::Extent2D {
width: viewport_dims[0] as u32,
height: viewport_dims[1] as u32,
};
let render_pass_begin_info = vk::RenderPassBeginInfo::builder()
.render_pass(render_pass)
.framebuffer(framebuffers.gui)
.render_area(vk::Rect2D {
offset: vk::Offset2D { x: 0, y: 0 },
extent,
})
.clear_values(&clear_values)
.build();
unsafe {
device.cmd_begin_render_pass(
cmd_buf,
&render_pass_begin_info,
vk::SubpassContents::INLINE,
)
};
let vx_bufs = [self.vertices.vertex_buffer];
let pc_bytes = {
let push_constants = GuiPushConstants::new(viewport_dims);
push_constants.bytes()
};
for (ix, &(start, ix_count)) in self.vertices.ranges.iter().enumerate()
{
if ix_count == 0 {
continue;
}
let vx_offset = self.vertices.vertex_offsets[ix];
let clip = self.vertices.clips[ix];
let offset = vk::Offset2D {
x: clip.min.x as i32,
y: clip.min.y as i32,
};
let extent = vk::Extent2D {
width: (clip.max.x - clip.min.x) as u32,
height: (clip.max.y - clip.min.y) as u32,
};
let scissor = vk::Rect2D { offset, extent };
let scissors = [scissor];
let texture_id = self.vertices.texture_ids[ix];
unsafe {
device.cmd_set_scissor(cmd_buf, 0, &scissors);
let offsets = [0];
device.cmd_bind_vertex_buffers(cmd_buf, 0, &vx_bufs, &offsets);
device.cmd_bind_index_buffer(
cmd_buf,
self.vertices.index_buffer,
(start * 4) as vk::DeviceSize,
vk::IndexType::UINT32,
);
match texture_id {
egui::TextureId::Egui => {
device.cmd_bind_pipeline(
cmd_buf,
vk::PipelineBindPoint::GRAPHICS,
self.tex_2d_pipeline,
);
let desc_sets = [self.egui_texture_set];
device.cmd_bind_descriptor_sets(
cmd_buf,
vk::PipelineBindPoint::GRAPHICS,
self.tex_2d_pipeline_layout,
0,
&desc_sets,
&[],
);
use vk::ShaderStageFlags as Flags;
device.cmd_push_constants(
cmd_buf,
self.tex_2d_pipeline_layout,
Flags::VERTEX,
0,
&pc_bytes,
);
device.cmd_draw_indexed(
cmd_buf,
ix_count,
1,
0,
vx_offset as i32,
0,
)
}
egui::TextureId::User(texture_id) => {
device.cmd_bind_pipeline(
cmd_buf,
vk::PipelineBindPoint::GRAPHICS,
self.tex_rgba_pipeline,
);
let desc_set = self
.texture_set_map
.get(&texture_id)
.expect("GUI tried to use missing texture");
let desc_sets = [*desc_set];
device.cmd_bind_descriptor_sets(
cmd_buf,
vk::PipelineBindPoint::GRAPHICS,
self.tex_rgba_pipeline_layout,
0,
&desc_sets,
&[],
);
use vk::ShaderStageFlags as Flags;
device.cmd_push_constants(
cmd_buf,
self.tex_rgba_pipeline_layout,
Flags::VERTEX,
0,
&pc_bytes,
);
device.cmd_draw_indexed(
cmd_buf,
ix_count,
1,
0,
vx_offset as i32,
0,
)
}
}
};
}
unsafe { device.cmd_end_render_pass(cmd_buf) };
Ok(())
}
pub fn destroy(&mut self, allocator: &vk_mem::Allocator) {
let device = &self.device;
unsafe {
device.destroy_descriptor_set_layout(
self.descriptor_set_layout,
None,
);
device.destroy_sampler(self.sampler, None);
device.destroy_pipeline(self.tex_2d_pipeline, None);
device.destroy_pipeline_layout(self.tex_2d_pipeline_layout, None);
device.destroy_pipeline(self.tex_rgba_pipeline, None);
device.destroy_pipeline_layout(self.tex_rgba_pipeline_layout, None);
self.vertices.destroy(allocator);
if !self.egui_texture.is_null() {
self.egui_texture.destroy(device);
}
}
}
pub fn egui_texture_version(&self) -> u64 {
self.egui_texture_version
}
pub fn egui_texture_is_null(&self) -> bool {
self.egui_texture.is_null()
}
pub fn upload_egui_texture(
&mut self,
app: &super::super::GfaestusVk,
command_pool: vk::CommandPool,
transition_queue: vk::Queue,
texture: &egui::Texture,
) -> Result<()> {
if !self.egui_texture_is_null() {
self.egui_texture.destroy(&app.vk_context.device());
}
let width = texture.width;
let height = texture.height;
let pixels = &texture.pixels;
let version = texture.version;
let texture = Texture::from_pixel_bytes(
app,
command_pool,
transition_queue,
width,
height,
pixels,
)?;
self.egui_texture = texture;
self.egui_texture_version = version;
let desc_write = self.egui_descriptor_write();
let desc_writes = [desc_write];
let device = app.vk_context().device();
unsafe { device.update_descriptor_sets(&desc_writes, &[]) }
Ok(())
}
fn egui_descriptor_write(&self) -> vk::WriteDescriptorSet {
let image_info = vk::DescriptorImageInfo::builder()
.image_layout(vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL)
.image_view(self.egui_texture.view)
.sampler(self.sampler)
.build();
let image_infos = [image_info];
let sampler_descriptor_write = vk::WriteDescriptorSet::builder()
.dst_set(self.egui_texture_set)
.dst_binding(0)
.dst_array_element(0)
.descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER)
.image_info(&image_infos)
.build();
sampler_descriptor_write
}
pub fn add_texture(
&mut self,
app: &GfaestusVk,
texture: Texture,
) -> Result<egui::TextureId> {
let device = app.vk_context().device();
let id = self.texture_sets.len() as u64;
let tex_id = egui::TextureId::User(id);
let texture_sets = {
let layouts = vec![self.descriptor_set_layout];
let alloc_info = vk::DescriptorSetAllocateInfo::builder()
.descriptor_pool(self.descriptor_pool)
.set_layouts(&layouts)
.build();
unsafe { device.allocate_descriptor_sets(&alloc_info) }
}?;
let image_info = vk::DescriptorImageInfo::builder()
.image_layout(vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL)
.image_view(texture.view)
.sampler(self.sampler)
.build();
let image_infos = [image_info];
let sampler_descriptor_write = vk::WriteDescriptorSet::builder()
.dst_set(texture_sets[0])
.dst_binding(0)
.dst_array_element(0)
.descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER)
.image_info(&image_infos)
.build();
let writes = [sampler_descriptor_write];
unsafe { device.update_descriptor_sets(&writes, &[]) }
self.texture_sets.push(texture_sets[0]);
self.texture_set_map.insert(id, texture_sets[0]);
Ok(tex_id)
}
fn gradient_descriptor_write(
&self,
texture_id: egui::TextureId,
gradients: &Gradients,
) -> vk::WriteDescriptorSet {
let texture = gradients.gradient_from_id(texture_id).unwrap();
let image_info = vk::DescriptorImageInfo::builder()
.image_layout(vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL)
.image_view(texture.texture.view)
.sampler(self.sampler)
.build();
let image_infos = [image_info];
let sampler_descriptor_write = vk::WriteDescriptorSet::builder()
.dst_set(vk::DescriptorSet::null())
.dst_binding(0)
.dst_array_element(0)
.descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER)
.image_info(&image_infos)
.build();
sampler_descriptor_write
}
fn layout_binding() -> vk::DescriptorSetLayoutBinding {
use vk::ShaderStageFlags as Stages;
vk::DescriptorSetLayoutBinding::builder()
.binding(0)
.descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER)
.descriptor_count(1)
.stage_flags(Stages::FRAGMENT)
.build()
}
fn create_descriptor_set_layout(
device: &Device,
) -> Result<vk::DescriptorSetLayout> {
let binding = Self::layout_binding();
let bindings = [binding];
let layout_info = vk::DescriptorSetLayoutCreateInfo::builder()
.bindings(&bindings)
.build();
let layout =
unsafe { device.create_descriptor_set_layout(&layout_info, None) }?;
Ok(layout)
}
fn create_pipeline(
device: &Device,
render_pass: vk::RenderPass,
descriptor_set_layout: vk::DescriptorSetLayout,
frag_src: Vec<u32>,
) -> (vk::Pipeline, vk::PipelineLayout) {
let vert_src = crate::load_shader!("gui/gui.vert.spv");
let vert_module = create_shader_module(device, &vert_src);
let frag_module = create_shader_module(device, &frag_src);
let entry_point = CString::new("main").unwrap();
let vert_state_info = vk::PipelineShaderStageCreateInfo::builder()
.stage(vk::ShaderStageFlags::VERTEX)
.module(vert_module)
.name(&entry_point)
.build();
let frag_state_info = vk::PipelineShaderStageCreateInfo::builder()
.stage(vk::ShaderStageFlags::FRAGMENT)
.module(frag_module)
.name(&entry_point)
.build();
let shader_state_infos = [vert_state_info, frag_state_info];
let vert_binding_descs = [GuiVertex::get_binding_desc()];
let vert_attr_descs = GuiVertex::get_attribute_descs();
let vert_input_info = vk::PipelineVertexInputStateCreateInfo::builder()
.vertex_binding_descriptions(&vert_binding_descs)
.vertex_attribute_descriptions(&vert_attr_descs)
.build();
let input_assembly_info =
vk::PipelineInputAssemblyStateCreateInfo::builder()
.topology(vk::PrimitiveTopology::TRIANGLE_LIST)
.primitive_restart_enable(false)
.build();
let viewport_info = vk::PipelineViewportStateCreateInfo::builder()
.viewport_count(1)
.scissor_count(1)
.build();
let dynamic_states = {
use vk::DynamicState as DS;
[DS::VIEWPORT, DS::SCISSOR]
};
let dynamic_state_info = vk::PipelineDynamicStateCreateInfo::builder()
.dynamic_states(&dynamic_states)
.build();
let rasterizer_info =
vk::PipelineRasterizationStateCreateInfo::builder()
.depth_clamp_enable(false)
.rasterizer_discard_enable(false)
.polygon_mode(vk::PolygonMode::FILL)
.line_width(1.0)
.cull_mode(vk::CullModeFlags::NONE)
.front_face(vk::FrontFace::COUNTER_CLOCKWISE)
.depth_bias_enable(false)
.depth_bias_constant_factor(0.0)
.depth_bias_clamp(0.0)
.depth_bias_slope_factor(0.0)
.build();
let multisampling_info =
vk::PipelineMultisampleStateCreateInfo::builder()
.sample_shading_enable(false)
.rasterization_samples(vk::SampleCountFlags::TYPE_1)
.min_sample_shading(1.0)
.alpha_to_coverage_enable(false)
.alpha_to_one_enable(false)
.build();
let color_blend_attachment =
vk::PipelineColorBlendAttachmentState::builder()
.color_write_mask(vk::ColorComponentFlags::all())
.blend_enable(true)
.src_color_blend_factor(vk::BlendFactor::SRC_ALPHA)
.dst_color_blend_factor(vk::BlendFactor::ONE_MINUS_SRC_ALPHA)
.color_blend_op(vk::BlendOp::ADD)
.src_alpha_blend_factor(vk::BlendFactor::SRC_ALPHA)
.dst_alpha_blend_factor(vk::BlendFactor::ONE_MINUS_SRC_ALPHA)
.alpha_blend_op(vk::BlendOp::ADD)
.build();
let color_blend_attachments = [color_blend_attachment];
let color_blending_info =
vk::PipelineColorBlendStateCreateInfo::builder()
.logic_op_enable(false)
.logic_op(vk::LogicOp::COPY)
.attachments(&color_blend_attachments)
.blend_constants([0.0, 0.0, 0.0, 0.0])
.build();
let layout = {
let layouts = [descriptor_set_layout];
let pc_range = vk::PushConstantRange::builder()
.stage_flags(vk::ShaderStageFlags::VERTEX)
.offset(0)
.size(8)
.build();
let pc_ranges = [pc_range];
let layout_info = vk::PipelineLayoutCreateInfo::builder()
.set_layouts(&layouts)
.push_constant_ranges(&pc_ranges)
.build();
unsafe {
device.create_pipeline_layout(&layout_info, None).unwrap()
}
};
let pipeline_info = vk::GraphicsPipelineCreateInfo::builder()
.stages(&shader_state_infos)
.vertex_input_state(&vert_input_info)
.input_assembly_state(&input_assembly_info)
.viewport_state(&viewport_info)
.dynamic_state(&dynamic_state_info)
.rasterization_state(&rasterizer_info)
.multisample_state(&multisampling_info)
.color_blend_state(&color_blending_info)
.layout(layout)
.render_pass(render_pass)
.subpass(0)
.build();
let pipeline_infos = [pipeline_info];
let pipeline = unsafe {
device
.create_graphics_pipelines(
vk::PipelineCache::null(),
&pipeline_infos,
None,
)
.unwrap()[0]
};
unsafe {
device.destroy_shader_module(vert_module, None);
device.destroy_shader_module(frag_module, None);
}
(pipeline, layout)
}
}
pub struct GuiVertices {
vertex_buffer: vk::Buffer,
vertex_alloc: vk_mem::Allocation,
vertex_alloc_info: Option<vk_mem::AllocationInfo>,
index_buffer: vk::Buffer,
index_alloc: vk_mem::Allocation,
index_alloc_info: Option<vk_mem::AllocationInfo>,
ranges: Vec<(u32, u32)>,
vertex_offsets: Vec<u32>,
clips: Vec<egui::Rect>,
texture_ids: Vec<egui::TextureId>,
device: Device,
}
impl GuiVertices {
pub fn new(device: &Device) -> Self {
let vertex_buffer = vk::Buffer::null();
let vertex_alloc = vk_mem::Allocation::null();
let vertex_alloc_info = None;
let index_buffer = vk::Buffer::null();
let index_alloc = vk_mem::Allocation::null();
let index_alloc_info = None;
let ranges = Vec::new();
let vertex_offsets = Vec::new();
let clips = Vec::new();
let texture_ids = Vec::new();
let device = device.clone();
Self {
vertex_buffer,
vertex_alloc,
vertex_alloc_info,
index_buffer,
index_alloc,
index_alloc_info,
ranges,
vertex_offsets,
clips,
texture_ids,
device,
}
}
pub fn has_vertices(&self) -> bool {
!self.ranges.is_empty()
}
pub fn upload_meshes(
&mut self,
app: &super::super::GfaestusVk,
meshes: &[egui::ClippedMesh],
) -> Result<()> {
self.destroy(&app.allocator);
let mut vertices: Vec<GuiVertex> = Vec::new();
let mut indices: Vec<u32> = Vec::new();
let mut ranges: Vec<(u32, u32)> = Vec::new();
let mut vertex_offsets: Vec<u32> = Vec::new();
let mut clips: Vec<egui::Rect> = Vec::new();
let mut texture_ids: Vec<egui::TextureId> = Vec::new();
let mut offset = 0u32;
let mut vertex_offset = 0u32;
for egui::ClippedMesh(clip, mesh) in meshes.iter() {
let len = mesh.indices.len() as u32;
let vx_len = mesh.vertices.len() as u32;
indices.extend(mesh.indices.iter().copied());
vertices.extend(mesh.vertices.iter().map(|vx| {
let (r, g, b, a) = vx.color.to_tuple();
GuiVertex {
position: [vx.pos.x, vx.pos.y],
uv: [vx.uv.x, vx.uv.y],
color: [
(r as f32) / 255.0,
(g as f32) / 255.0,
(b as f32) / 255.0,
(a as f32) / 255.0,
],
}
}));
clips.push(*clip);
ranges.push((offset, len));
vertex_offsets.push(vertex_offset);
texture_ids.push(mesh.texture_id);
offset += len;
vertex_offset += vx_len;
}
let (vx_buf, vx_alloc, vx_alloc_info) = app
// .create_buffer_with_data::<u32, _>(
.create_buffer_with_data(
vk::BufferUsageFlags::VERTEX_BUFFER,
vk_mem::MemoryUsage::GpuOnly,
false,
&vertices,
)?;
let (ix_buf, ix_alloc, ix_alloc_info) = app
// .create_buffer_with_data::<u32, _>(
.create_buffer_with_data(
vk::BufferUsageFlags::INDEX_BUFFER,
vk_mem::MemoryUsage::GpuOnly,
false,
&indices,
)?;
app.set_debug_object_name(vx_buf, "GUI Vertex Buffer")?;
app.set_debug_object_name(ix_buf, "GUI Index Buffer")?;
self.vertex_buffer = vx_buf;
self.vertex_alloc = vx_alloc;
self.vertex_alloc_info = Some(vx_alloc_info);
self.index_buffer = ix_buf;
self.index_alloc = ix_alloc;
self.index_alloc_info = Some(ix_alloc_info);
self.ranges.clone_from(&ranges);
self.vertex_offsets.clone_from(&vertex_offsets);
self.clips.clone_from(&clips);
self.texture_ids.clone_from(&texture_ids);
Ok(())
}
pub fn destroy(&mut self, allocator: &vk_mem::Allocator) {
unsafe {
self.device.destroy_buffer(self.vertex_buffer, None);
self.device.destroy_buffer(self.index_buffer, None);
}
allocator.free_memory(&self.vertex_alloc).unwrap();
allocator.free_memory(&self.index_alloc).unwrap();
self.vertex_buffer = vk::Buffer::null();
self.vertex_alloc = vk_mem::Allocation::null();
self.vertex_alloc_info = None;
self.index_buffer = vk::Buffer::null();
self.index_alloc = vk_mem::Allocation::null();
self.index_alloc_info = None;
self.ranges.clear();
self.vertex_offsets.clear();
self.clips.clear();
}
}
use bytemuck::{Pod, Zeroable};
#[derive(Clone, Copy, Zeroable, Pod)]
#[repr(C)]
pub struct GuiVertex {
pub position: [f32; 2],
pub uv: [f32; 2],
pub color: [f32; 4],
}
impl GuiVertex {
fn get_binding_desc() -> vk::VertexInputBindingDescription {
vk::VertexInputBindingDescription::builder()
.binding(0)
.stride(std::mem::size_of::<GuiVertex>() as u32)
.input_rate(vk::VertexInputRate::VERTEX)
.build()
}
fn get_attribute_descs() -> [vk::VertexInputAttributeDescription; 3] {
let pos_desc = vk::VertexInputAttributeDescription::builder()
.binding(0)
.location(0)
.format(vk::Format::R32G32_SFLOAT)
.offset(0)
.build();
let uv_desc = vk::VertexInputAttributeDescription::builder()
.binding(0)
.location(1)
.format(vk::Format::R32G32_SFLOAT)
.offset(8)
.build();
let color_desc = vk::VertexInputAttributeDescription::builder()
.binding(0)
.location(2)
.format(vk::Format::R32G32B32A32_SFLOAT)
.offset(16)
.build();
[pos_desc, uv_desc, color_desc]
}
}
pub struct GuiPushConstants {
width: f32,
height: f32,
}
impl GuiPushConstants {
#[inline]
pub fn new(viewport_dims: [f32; 2]) -> Self {
let width = viewport_dims[0];
let height = viewport_dims[1];
Self { width, height }
}
#[inline]
pub fn bytes(&self) -> [u8; 8] {
let mut bytes = [0u8; 8];
{
let mut offset = 0;
let mut add_float = |f: f32| {
let f_bytes = f.to_ne_bytes();
for i in 0..4 {
bytes[offset] = f_bytes[i];
offset += 1;
}
};
add_float(self.width);
add_float(self.height);
}
bytes
}
}
|
// vim: shiftwidth=2
use std::convert::TryInto;
pub struct StructDeserializer<'a> {
src: &'a Vec<u8>,
ptr: usize
}
impl<'a> StructDeserializer<'a> {
pub fn new(src: &'a Vec<u8>) -> StructDeserializer<'a> {
StructDeserializer {
src: src,
ptr: 0
}
}
pub fn read_u16(self: &mut StructDeserializer<'a>) -> Option<u16> {
if self.ptr + 2 > self.src.len() {
self.ptr = self.src.len();
None
}
else {
let res = u16::from_ne_bytes(self.src[self.ptr .. self.ptr+2].try_into().unwrap());
self.ptr += 2;
Some(res)
}
}
pub fn read_i32(self: &mut StructDeserializer<'a>) -> Option<i32> {
if self.ptr + 4 > self.src.len() {
self.ptr = self.src.len();
None
}
else {
let res = i32::from_ne_bytes(self.src[self.ptr .. self.ptr+4].try_into().unwrap());
self.ptr += 4;
Some(res)
}
}
pub fn read_i64(self: &mut StructDeserializer<'a>) -> Option<i64> {
if self.ptr + 8 > self.src.len() {
self.ptr = self.src.len();
None
}
else {
let res = i64::from_ne_bytes(self.src[self.ptr .. self.ptr+8].try_into().unwrap());
self.ptr += 8;
Some(res)
}
}
}
|
use std::u64;
use amethyst::{
core::transform::components::Transform,
ecs::prelude::{Component, VecStorage},
};
use crate::resources::{
ingame::game_world::{ChunkIndex, Planet, GameWorldError, TileIndex},
RenderConfig,
};
/// This component stores the current chunk and tile the player resides on.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Position {
pub chunk: ChunkIndex,
pub tile: TileIndex,
}
impl Position {
pub fn new(chunk_index: ChunkIndex, tile_index: TileIndex,) -> Self {
Position {
chunk: chunk_index,
tile: tile_index,
}
}
pub fn from_transform(
transform: &Transform,
render_config: &RenderConfig,
planet: &Planet,
) -> Result<Self, GameWorldError,> {
let chunk_id = ChunkIndex::from_transform(transform, render_config, planet,)?;
let tile_id = TileIndex::from_transform(transform, chunk_id, render_config, planet,)?;
let rv = Position {
chunk: chunk_id,
tile: tile_id,
};
#[cfg(feature = "debug")]
debug!("| Created position from transform. {:?}", rv);
Ok(rv,)
}
}
impl Default for Position {
fn default() -> Self {
#[cfg(feature = "debug")]
debug!("Created default position.");
Self::new(ChunkIndex(u64::MAX, u64::MAX,), TileIndex(0, 0,),)
}
}
impl Component for Position {
type Storage = VecStorage<Self,>;
}
|
use crate::ray::Ray;
use crate::vec3::Vec3;
use crate::material::Material;
use std::rc::Rc;
pub struct HitRecord {
pub p: Vec3,
pub n: Vec3,
pub material_ref: Rc<dyn Material>,
pub t: f32,
pub front_face: bool
}
impl HitRecord {
pub fn new_zero(mat: Rc<dyn Material>) -> HitRecord {
HitRecord {
p: Vec3::new(),
n: Vec3::new(),
material_ref: mat,
t: 0.0,
front_face: true
}
}
pub fn set_face_normal(&mut self, ray: &Ray, outward_normal: &Vec3) {
self.front_face = if outward_normal.dot(&ray.direction) < 0.0 {true} else {false};
self.n = if self.front_face {*outward_normal} else {-*outward_normal};
}
}
pub trait Hittable {
fn hit(&self, ray: &Ray, t_min: f32, t_max: f32, hit: &mut HitRecord) -> bool;
} |
pub trait Commit {
type Log;
#[must_use]
fn commit(self) -> Self::Log;
}
|
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::shared::PackageWithVersion;
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PathItem {
pub real: String,
pub move_to: String,
}
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum Version {
V1_0,
V2_0,
VUnknown,
}
/**
* Type doc: <https://www.debian.org/doc/debian-policy/ch-controlfields.html#s-binarycontrolfiles>
* YAML format
*/
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Control {
pub package: String,
pub source: Option<String>,
pub version: String,
pub section: Option<String>,
pub priority: Option<String>,
pub architecture: String,
pub essential: Option<String>,
pub install_size: Option<u64>, // There could be a better value for this, however rust-yaml outputs it as i64
pub maintainer: String,
pub description: String,
pub homepage: Option<String>,
pub built_using: Option<String>,
// Depends et al: <https://www.debian.org/doc/debian-policy/ch-relationships.html#s-binarydeps>
pub depends: Vec<PackageWithVersion>,
pub pre_depends: Vec<PackageWithVersion>,
pub recommends: Vec<PackageWithVersion>,
pub suggests: Vec<PackageWithVersion>,
pub enhances: Vec<PackageWithVersion>,
pub breaks: Vec<PackageWithVersion>,
pub conflicts: Vec<PackageWithVersion>,
}
|
use bigint::{Address, U256};
pub struct Block {
pub beneficiary: Address,
pub difficulty: U256,
pub gas_limit: U256,
pub number: U256,
pub timestamp: U256,
}
|
#[cfg(feature = "default")]
#[test]
fn test_error() {
use relox::{Error, ErrorKind};
let error = Error::new(ErrorKind::InvalidData);
assert_eq!(error.kind(), ErrorKind::InvalidData);
assert_ne!(ErrorKind::NotEnoughData, ErrorKind::InvalidData);
}
#[cfg(feature = "default")]
#[test]
fn test_elf32rel() {
use relox::Elf32Rel;
use std::io::Cursor;
let elf32rel = Elf32Rel::from_memory(&mut Cursor::new(&[0; 8])).unwrap();
assert_eq!(elf32rel.offset(), 0x00);
assert_eq!(elf32rel.relocation_type(), 0x00);
}
|
#![deny(missing_docs)]
//! # Legion Type Uuid
//!
//! legion-typeuuid provides the `SerializableTypeUuid` type key, which can be used with legion's `Registry` to provide stable component type ID mappings for world serialization.
//!
//! ```rust
//! # use legion::*;
//! # use legion_typeuuid::*;
//! # #[derive(serde::Serialize, serde::Deserialize)]
//! # struct Position;
//! let mut registry = Registry::<SerializableTypeUuid>::default();
//! let uuid = SerializableTypeUuid::parse_str("1d97d71a-76bf-41d1-94c3-fcaac8231f12").unwrap();
//! registry.register::<Position>(uuid);
//! ```
//!
//! ## Feature Flags
//!
//! ### `type-uuid`
//!
//! Allows type UUIDs defined with the `type-uuid` crate to be used with `SerializableTypeUuid`.
//!
//! ```rust
//! # #[cfg(feature = "type-uuid")] {
//! # use legion::*;
//! # use legion_typeuuid::*;
//! # use serde::{Serialize, Deserialize};
//! # use type_uuid::TypeUuid;
//! #[derive(Serialize, Deserialize, TypeUuid)]
//! #[uuid = "1d97d71a-76bf-41d1-94c3-fcaac8231f12"]
//! struct Position;
//!
//! let mut registry = Registry::<SerializableTypeUuid>::default();
//! registry.register_auto_mapped::<Position>();
//! # }
//! ```
//!
//! ### `collect`
//!
//! Allows automatic component type registration with the `register_serialize!` macro and `collect_registry()` function. This feature requires your crate to also declare a dependency to the `inventory` crate.
//!
//! ```rust
//! # #[cfg(feature = "collect")] {
//! # use legion::*;
//! # use legion_typeuuid::*;
//! # use serde::{Serialize, Deserialize};
//! #[derive(Serialize, Deserialize)]
//! struct Position;
//!
//! register_serialize!(Position, "1d97d71a-76bf-41d1-94c3-fcaac8231f12");
//!
//! let registry = collect_registry();
//! # }
//! ```
//!
//! This can be used together with the `type-uuid` feature.
//!
//! ```rust
//! # #[cfg(all(feature = "type-uuid", feature = "collect"))] {
//! # use legion::*;
//! # use legion_typeuuid::*;
//! # use serde::{Serialize, Deserialize};
//! # use type_uuid::TypeUuid;
//! #[derive(Serialize, Deserialize, TypeUuid)]
//! #[uuid = "1d97d71a-76bf-41d1-94c3-fcaac8231f12"]
//! struct Position;
//!
//! register_serialize!(Position);
//!
//! let registry = collect_registry();
//! # }
//! ```
use serde::{Deserialize, Serialize};
use uuid::Uuid;
#[cfg(feature = "collect")]
pub use crate::collect::{collect_registry, TypeUuidRegistration};
/// Maps component types to stable UUIDs for (de)serialization via the `type_uuid` crate.
#[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct SerializableTypeUuid(Uuid);
impl SerializableTypeUuid {
/// Parses a UUID string into a `SerializableTypeUuid`.
pub fn parse_str(uuid: &str) -> Option<Self> {
let uuid = Uuid::parse_str(uuid).ok()?;
Some(Self(uuid))
}
}
impl From<[u8; 16]> for SerializableTypeUuid {
fn from(bytes: [u8; 16]) -> Self {
Self(Uuid::from_bytes(bytes))
}
}
#[cfg(feature = "type-uuid")]
impl<T: legion::storage::Component + type_uuid::TypeUuid> legion::serialize::AutoTypeKey<T>
for SerializableTypeUuid
{
fn new() -> Self {
Self::from(T::UUID)
}
}
#[cfg(feature = "collect")]
mod collect {
use super::SerializableTypeUuid;
use legion::Registry;
/// Holds information required to register a type with legion's Registry.
pub struct TypeUuidRegistration {
/// A function which registers a type with the given `Registry`.
pub builder: fn(&mut Registry<SerializableTypeUuid>),
}
inventory::collect!(TypeUuidRegistration);
/// Constructs a `Registry` which is pre-loaded with all types which have been
/// registered with the `register_serialize` macro.
pub fn collect_registry() -> Registry<SerializableTypeUuid> {
let mut registry = Registry::default();
for registration in inventory::iter::<TypeUuidRegistration> {
(registration.builder)(&mut registry);
}
registry
}
/// Registers a component type with a UUID for serialization.
#[macro_export]
#[cfg(not(feature = "type-uuid"))]
macro_rules! register_serialize {
($component:ty, $uuid:literal) => {
::inventory::submit! {
::legion_typeuuid::TypeUuidRegistration {
builder: |registry| {
let id = ::legion_typeuuid::SerializableTypeUuid::parse_str($uuid)
.expect(&format!("could not parse uuid for {}", std::any::type_name::<$component>()));
registry.register::<$component>(id);
}
}
}
};
}
/// Registers a component type with a UUID for serialization.
#[macro_export]
#[cfg(feature = "type-uuid")]
macro_rules! register_serialize {
($component:ty, $uuid:literal) => {
::inventory::submit! {
::legion_typeuuid::TypeUuidRegistration {
builder: |registry| {
let id = ::legion_typeuuid::SerializableTypeUuid::parse_str($uuid)
.expect(&format!("could not parse uuid for {}", std::any::type_name::<$component>()));
registry.register::<$component>(id);
}
}
}
};
($component:ty) => {
::inventory::submit! {
::legion_typeuuid::TypeUuidRegistration {
builder: |registry| {
let uuid = <$component as ::type_uuid::TypeUuid>::UUID;
registry.register::<$component>(uuid.into());
}
}
}
};
}
macro_rules! register_serialize_external {
($component:ty, $uuid:literal) => {
::inventory::submit! {
crate::TypeUuidRegistration {
builder: |registry| {
let uuid = ::uuid::Uuid::parse_str($uuid).unwrap();
let id = crate::SerializableTypeUuid(uuid);
registry.register::<$component>(id);
}
}
}
};
}
register_serialize_external!(bool, "abea8c1e-6910-43e4-b579-9ef1b5a95226");
register_serialize_external!(isize, "0d3b0c08-45ff-43f4-a145-b2bdef69d1d2");
register_serialize_external!(i8, "92fd5f7b-2102-46cb-9b1b-662df636625a");
register_serialize_external!(i16, "a02dfda1-8603-4d69-818a-1e1c47b154b6");
register_serialize_external!(i32, "6dd1ba7e-fa8b-4aa1-ac22-c28773798975");
register_serialize_external!(i64, "3103622f-fdfa-4ae3-8ede-67b56bd332fd");
register_serialize_external!(usize, "1d4562ce-b27d-4e99-af44-a40aca248c2e");
register_serialize_external!(u8, "b0fe47a9-fd37-41c6-b2ab-bed5d385ccde");
register_serialize_external!(u16, "3ad2a84b-c5a6-414c-8628-75613e11e67e");
register_serialize_external!(u32, "f6cc80b8-94e8-4c05-80b1-a8fbbaeb67af");
register_serialize_external!(u64, "da9a3e45-516c-4412-87d2-96ea17bebd21");
register_serialize_external!(i128, "0dbb7b33-9f27-4b3f-aebc-11426c464323");
register_serialize_external!(u128, "46eaab86-9268-4e98-ac9f-76eb71a1f0b4");
register_serialize_external!(f32, "5b1d1734-9fcc-43e7-8cc6-452ba16ff1fd");
register_serialize_external!(f64, "76b2ebf4-cd06-41de-96dc-2f402ffa46b2");
register_serialize_external!(char, "9786a9f4-1195-4dd1-875d-3e469454d9c4");
register_serialize_external!(String, "7edbc10a-2147-499c-af9a-498723c7b35f");
register_serialize_external!(std::ffi::CString, "d26a39da-d0e2-46b1-aeab-481fe57d0f23");
#[cfg(any(unix, windows))]
register_serialize_external!(std::ffi::OsString, "38485fce-f5d0-48df-b5cb-98e510c26a8d");
register_serialize_external!(std::num::NonZeroU8, "284b98ec-ecb5-463c-9744-23b8669c5553");
register_serialize_external!(std::num::NonZeroU16, "38f030e4-6046-45c9-96b4-1830b1aa3f35");
register_serialize_external!(std::num::NonZeroU32, "b32f7cc7-2841-48b3-8d8e-760414b4c4ab");
register_serialize_external!(std::num::NonZeroU64, "b43c6dad-6608-4f02-817a-8eac8c6345cb");
register_serialize_external!(std::time::Duration, "449a4224-4665-47ce-88a2-8d0310d20572");
register_serialize_external!(
std::time::SystemTime,
"b8dfc518-faf7-4590-91ba-82acd78b1685"
);
register_serialize_external!(std::net::IpAddr, "a3c248b7-94e1-4d4a-8b7e-fd1915f4c81b");
register_serialize_external!(std::net::Ipv4Addr, "a62542a2-6a38-4980-9467-f093bb546140");
register_serialize_external!(std::net::Ipv6Addr, "a6ba4f16-f436-4ae2-ae62-69dd08150b33");
register_serialize_external!(std::net::SocketAddr, "fe76891f-3e0a-49f7-b32e-14fc11768844");
register_serialize_external!(
std::net::SocketAddrV4,
"e951fa30-50d9-4832-8bc9-c49c06037697"
);
register_serialize_external!(
std::net::SocketAddrV6,
"8840455b-ad6c-41ae-8694-e50873d952c4"
);
register_serialize_external!(std::path::PathBuf, "d6db3123-4c95-45de-a28f-5a48d574b9c4");
#[allow(dead_code)]
type Unit = ();
register_serialize_external!(Unit, "03748d1a-0d0c-472f-9fdd-424856157064");
}
#[cfg(test)]
mod tests {
#[test]
#[cfg(feature = "collect")]
fn serialize_json_uuid() {
use super::*;
use legion::*;
let mut world = World::default();
let entity = world.extend(vec![
(1usize, false, 1isize),
(2usize, false, 2isize),
(3usize, false, 3isize),
(4usize, false, 4isize),
])[0];
let registry = collect_registry();
let json = serde_json::to_value(&world.as_serializable(any(), ®istry)).unwrap();
println!("{:#}", json);
use serde::de::DeserializeSeed;
let world: World = registry.as_deserialize().deserialize(json).unwrap();
let entry = world.entry_ref(entity).unwrap();
assert_eq!(entry.get_component::<usize>().unwrap(), &1usize);
assert_eq!(entry.get_component::<bool>().unwrap(), &false);
assert_eq!(entry.get_component::<isize>().unwrap(), &1isize);
assert_eq!(4, world.len());
}
}
|
use crate::read_pattern::ReadPattern;
#[derive(Copy, Clone, Debug)]
pub struct ManyPattern<T>(pub T, pub u32);
impl<T> ReadPattern for ManyPattern<T> where
T: ReadPattern {
fn read_pattern(&self, text: &str) -> Option<usize> {
let mut len = 0;
for _ in 0..self.1 {
match self.0.read_pattern(&text[len..]) {
Some(l) => len += l,
None => return None,
}
}
Some(len)
}
}
|
use std::os::unix::io::RawFd;
use std::mem;
use nix;
use nix::sys::termios;
use nix::sys::termios::{IGNBRK, BRKINT, PARMRK, ISTRIP, INLCR, IGNCR, ICRNL, IXON};
use nix::sys::termios::{OPOST, ECHO, ECHONL, ICANON, ISIG, IEXTEN, CSIZE, PARENB, CS8};
use nix::sys::termios::{VMIN, VTIME};
use nix::sys::termios::SetArg;
use nix::sys::termios::Termios;
use util::errors::Error;
mod ffi {
use libc;
#[cfg(target_os = "macos")]
pub const TIOCGWINSZ: libc::c_ulong = 0x40087468;
#[cfg(target_os = "linux")]
pub const TIOCGWINSZ: libc::c_ulong = 0x00005413;
#[repr(C)]
#[derive(Debug, Clone)]
pub struct winsize {
pub ws_row: u16,
pub ws_col: u16,
ws_xpixel: u16,
ws_ypixel: u16,
}
extern {
pub fn ioctl(fd: libc::c_int, req: libc::c_ulong, ...) -> libc::c_int;
}
}
/// Controller for low-level interaction with a terminal device.
pub struct TermCtl {
fd: RawFd,
orig_tios: Termios,
}
impl TermCtl {
pub fn new(fd: RawFd) -> Result<TermCtl, Error> {
Ok(TermCtl {
fd: fd,
orig_tios: try!(termios::tcgetattr(fd)),
})
}
pub fn set(&self) -> Result<(), Error> {
let mut tios = self.orig_tios.clone();
tios.c_iflag = tios.c_iflag & !(IGNBRK | BRKINT | PARMRK | ISTRIP |
INLCR | IGNCR | ICRNL | IXON);
tios.c_oflag = tios.c_oflag & !OPOST;
tios.c_lflag = tios.c_lflag & !(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
tios.c_cflag = tios.c_cflag & !(CSIZE | PARENB);
tios.c_cflag = tios.c_cflag | CS8;
tios.c_cc[VMIN] = 0;
tios.c_cc[VTIME] = 0;
try!(termios::tcsetattr(self.fd, SetArg::TCSAFLUSH, &tios));
Ok(())
}
pub fn window_size(&self) -> Result<(usize, usize), Error> {
let mut ws: ffi::winsize = unsafe { mem::uninitialized() };
try!(unsafe {
nix::from_ffi(ffi::ioctl(self.fd, ffi::TIOCGWINSZ, &mut ws))
});
Ok((ws.ws_col as usize, ws.ws_row as usize))
}
pub fn reset(&self) -> Result<(), Error> {
try!(termios::tcsetattr(self.fd, SetArg::TCSAFLUSH, &self.orig_tios));
Ok(())
}
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, API_VERSION};
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
AuthorizationOperations_List(#[from] authorization_operations::list::Error),
#[error(transparent)]
ManagementLocks_GetAtResourceGroupLevel(#[from] management_locks::get_at_resource_group_level::Error),
#[error(transparent)]
ManagementLocks_CreateOrUpdateAtResourceGroupLevel(#[from] management_locks::create_or_update_at_resource_group_level::Error),
#[error(transparent)]
ManagementLocks_DeleteAtResourceGroupLevel(#[from] management_locks::delete_at_resource_group_level::Error),
#[error(transparent)]
ManagementLocks_GetByScope(#[from] management_locks::get_by_scope::Error),
#[error(transparent)]
ManagementLocks_CreateOrUpdateByScope(#[from] management_locks::create_or_update_by_scope::Error),
#[error(transparent)]
ManagementLocks_DeleteByScope(#[from] management_locks::delete_by_scope::Error),
#[error(transparent)]
ManagementLocks_GetAtResourceLevel(#[from] management_locks::get_at_resource_level::Error),
#[error(transparent)]
ManagementLocks_CreateOrUpdateAtResourceLevel(#[from] management_locks::create_or_update_at_resource_level::Error),
#[error(transparent)]
ManagementLocks_DeleteAtResourceLevel(#[from] management_locks::delete_at_resource_level::Error),
#[error(transparent)]
ManagementLocks_GetAtSubscriptionLevel(#[from] management_locks::get_at_subscription_level::Error),
#[error(transparent)]
ManagementLocks_CreateOrUpdateAtSubscriptionLevel(#[from] management_locks::create_or_update_at_subscription_level::Error),
#[error(transparent)]
ManagementLocks_DeleteAtSubscriptionLevel(#[from] management_locks::delete_at_subscription_level::Error),
#[error(transparent)]
ManagementLocks_ListAtResourceGroupLevel(#[from] management_locks::list_at_resource_group_level::Error),
#[error(transparent)]
ManagementLocks_ListAtResourceLevel(#[from] management_locks::list_at_resource_level::Error),
#[error(transparent)]
ManagementLocks_ListAtSubscriptionLevel(#[from] management_locks::list_at_subscription_level::Error),
#[error(transparent)]
ManagementLocks_ListByScope(#[from] management_locks::list_by_scope::Error),
}
pub mod authorization_operations {
use super::{models, API_VERSION};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<models::OperationListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.Authorization/operations", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::OperationListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod management_locks {
use super::{models, API_VERSION};
pub async fn get_at_resource_group_level(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
lock_name: &str,
subscription_id: &str,
) -> std::result::Result<models::ManagementLockObject, get_at_resource_group_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(get_at_resource_group_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_at_resource_group_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_at_resource_group_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_at_resource_group_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject = serde_json::from_slice(rsp_body)
.map_err(|source| get_at_resource_group_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| get_at_resource_group_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_at_resource_group_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_at_resource_group_level {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update_at_resource_group_level(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
lock_name: &str,
parameters: &models::ManagementLockObject,
subscription_id: &str,
) -> std::result::Result<create_or_update_at_resource_group_level::Response, create_or_update_at_resource_group_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update_at_resource_group_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update_at_resource_group_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update_at_resource_group_level::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(create_or_update_at_resource_group_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update_at_resource_group_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_at_resource_group_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_at_resource_group_level::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_at_resource_group_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_at_resource_group_level::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_at_resource_group_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update_at_resource_group_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update_at_resource_group_level {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::ManagementLockObject),
Created201(models::ManagementLockObject),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete_at_resource_group_level(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
lock_name: &str,
subscription_id: &str,
) -> std::result::Result<delete_at_resource_group_level::Response, delete_at_resource_group_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(delete_at_resource_group_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete_at_resource_group_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(delete_at_resource_group_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(delete_at_resource_group_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete_at_resource_group_level::Response::NoContent204),
http::StatusCode::OK => Ok(delete_at_resource_group_level::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| delete_at_resource_group_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete_at_resource_group_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete_at_resource_group_level {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
NoContent204,
Ok200,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_by_scope(
operation_config: &crate::OperationConfig,
scope: &str,
lock_name: &str,
) -> std::result::Result<models::ManagementLockObject, get_by_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
scope,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(get_by_scope::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_by_scope::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_by_scope::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_by_scope::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject =
serde_json::from_slice(rsp_body).map_err(|source| get_by_scope::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get_by_scope::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_by_scope::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_by_scope {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update_by_scope(
operation_config: &crate::OperationConfig,
scope: &str,
lock_name: &str,
parameters: &models::ManagementLockObject,
) -> std::result::Result<create_or_update_by_scope::Response, create_or_update_by_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
scope,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update_by_scope::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update_by_scope::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update_by_scope::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(create_or_update_by_scope::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update_by_scope::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_by_scope::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_by_scope::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_by_scope::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_by_scope::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_by_scope::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update_by_scope::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update_by_scope {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::ManagementLockObject),
Created201(models::ManagementLockObject),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete_by_scope(
operation_config: &crate::OperationConfig,
scope: &str,
lock_name: &str,
) -> std::result::Result<delete_by_scope::Response, delete_by_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
scope,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(delete_by_scope::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete_by_scope::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete_by_scope::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(delete_by_scope::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete_by_scope::Response::NoContent204),
http::StatusCode::OK => Ok(delete_by_scope::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| delete_by_scope::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete_by_scope::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete_by_scope {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
NoContent204,
Ok200,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_at_resource_level(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_provider_namespace: &str,
parent_resource_path: &str,
resource_type: &str,
resource_name: &str,
lock_name: &str,
subscription_id: &str,
) -> std::result::Result<models::ManagementLockObject, get_at_resource_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/{}/{}/{}/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(get_at_resource_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_at_resource_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_at_resource_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_at_resource_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject = serde_json::from_slice(rsp_body)
.map_err(|source| get_at_resource_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| get_at_resource_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_at_resource_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_at_resource_level {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update_at_resource_level(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_provider_namespace: &str,
parent_resource_path: &str,
resource_type: &str,
resource_name: &str,
lock_name: &str,
parameters: &models::ManagementLockObject,
subscription_id: &str,
) -> std::result::Result<create_or_update_at_resource_level::Response, create_or_update_at_resource_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/{}/{}/{}/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update_at_resource_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update_at_resource_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update_at_resource_level::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(create_or_update_at_resource_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update_at_resource_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_at_resource_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_at_resource_level::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_at_resource_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_at_resource_level::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_at_resource_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update_at_resource_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update_at_resource_level {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::ManagementLockObject),
Created201(models::ManagementLockObject),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete_at_resource_level(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_provider_namespace: &str,
parent_resource_path: &str,
resource_type: &str,
resource_name: &str,
lock_name: &str,
subscription_id: &str,
) -> std::result::Result<delete_at_resource_level::Response, delete_at_resource_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/{}/{}/{}/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(delete_at_resource_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete_at_resource_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(delete_at_resource_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(delete_at_resource_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete_at_resource_level::Response::NoContent204),
http::StatusCode::OK => Ok(delete_at_resource_level::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| delete_at_resource_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete_at_resource_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete_at_resource_level {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
NoContent204,
Ok200,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_at_subscription_level(
operation_config: &crate::OperationConfig,
lock_name: &str,
subscription_id: &str,
) -> std::result::Result<models::ManagementLockObject, get_at_subscription_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
subscription_id,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(get_at_subscription_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_at_subscription_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_at_subscription_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_at_subscription_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject = serde_json::from_slice(rsp_body)
.map_err(|source| get_at_subscription_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| get_at_subscription_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_at_subscription_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_at_subscription_level {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update_at_subscription_level(
operation_config: &crate::OperationConfig,
lock_name: &str,
parameters: &models::ManagementLockObject,
subscription_id: &str,
) -> std::result::Result<create_or_update_at_subscription_level::Response, create_or_update_at_subscription_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
subscription_id,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update_at_subscription_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update_at_subscription_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update_at_subscription_level::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(create_or_update_at_subscription_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update_at_subscription_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_at_subscription_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_at_subscription_level::Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockObject = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_at_subscription_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_at_subscription_level::Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_at_subscription_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update_at_subscription_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update_at_subscription_level {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Created201(models::ManagementLockObject),
Ok200(models::ManagementLockObject),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete_at_subscription_level(
operation_config: &crate::OperationConfig,
lock_name: &str,
subscription_id: &str,
) -> std::result::Result<delete_at_subscription_level::Response, delete_at_subscription_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Authorization/locks/{}",
operation_config.base_path(),
subscription_id,
lock_name
);
let mut url = url::Url::parse(url_str).map_err(delete_at_subscription_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete_at_subscription_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(delete_at_subscription_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(delete_at_subscription_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete_at_subscription_level::Response::NoContent204),
http::StatusCode::OK => Ok(delete_at_subscription_level::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| delete_at_subscription_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete_at_subscription_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete_at_subscription_level {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
NoContent204,
Ok200,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_at_resource_group_level(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::ManagementLockListResult, list_at_resource_group_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Authorization/locks",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_at_resource_group_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_at_resource_group_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_at_resource_group_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_at_resource_group_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_at_resource_group_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_at_resource_group_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_at_resource_group_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_at_resource_group_level {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_at_resource_level(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_provider_namespace: &str,
parent_resource_path: &str,
resource_type: &str,
resource_name: &str,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::ManagementLockListResult, list_at_resource_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/{}/{}/{}/{}/providers/Microsoft.Authorization/locks",
operation_config.base_path(),
subscription_id,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name
);
let mut url = url::Url::parse(url_str).map_err(list_at_resource_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_at_resource_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_at_resource_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_at_resource_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_at_resource_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_at_resource_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_at_resource_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_at_resource_level {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_at_subscription_level(
operation_config: &crate::OperationConfig,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::ManagementLockListResult, list_at_subscription_level::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Authorization/locks",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_at_subscription_level::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_at_subscription_level::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_at_subscription_level::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_at_subscription_level::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_at_subscription_level::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_at_subscription_level::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_at_subscription_level::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_at_subscription_level {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_scope(
operation_config: &crate::OperationConfig,
scope: &str,
filter: Option<&str>,
) -> std::result::Result<models::ManagementLockListResult, list_by_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/{}/providers/Microsoft.Authorization/locks", operation_config.base_path(), scope);
let mut url = url::Url::parse(url_str).map_err(list_by_scope::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_scope::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_scope::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_scope::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ManagementLockListResult =
serde_json::from_slice(rsp_body).map_err(|source| list_by_scope::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list_by_scope::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_scope::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_scope {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
|
//! Contains pre and post processing logic.
pub mod post;
pub mod pre;
|
use std::io;
use std::io::BufRead;
use std::io::BufReader;
use std::io::Write;
use std::net::TcpStream;
use clap;
use output::Output;
use ::client::*;
use ::misc::*;
fn do_client_status (
output: & Output,
arguments: & ClientStatusArguments,
) -> Result <bool, String> {
let mut stream =
io_result_with_prefix (
|| format! (
"Connection error: "),
TcpStream::connect (
(
arguments.server_hostname.as_str (),
arguments.server_port,
),
),
) ?;
io_result_with_prefix (
|| format! (
"Communication error: "),
write! (
stream,
"status\n"),
) ?;
let mut reader =
BufReader::new (
stream);
let mut response_line =
String::new ();
io_result_with_prefix (
|| format! (
"Communication error: "),
reader.read_line (
& mut response_line,
),
) ?;
if response_line != "OK\n" {
return Err (
format! (
"Server returned error: {}\n",
response_line.trim ()));
}
io_result_with_prefix (
|| format! (
"Communication error: "),
io::copy (
& mut reader,
& mut io::stdout (),
),
) ?;
output.message (
"Status complete");
Ok (true)
}
command! (
name = status,
export = client_status_command,
arguments = ClientStatusArguments {
server_hostname: String,
server_port: u16,
},
clap_subcommand = {
clap::SubCommand::with_name ("status")
.about ("Shows server status")
.arg (
clap::Arg::with_name ("server-address")
.long ("server-address")
.value_name ("SERVER-ADDRESS")
.required (true)
.help ("Server address, in 'host:port' format")
)
},
clap_arguments_parse = |clap_matches| {
let (server_hostname, server_port) =
parse_server_address (
args::string_required (
clap_matches,
"server-address"),
);
ClientStatusArguments {
server_hostname: server_hostname,
server_port: server_port,
}
},
action = |output, arguments| {
do_client_status (output, arguments)
},
);
// ex: noet ts=4 filetype=rust
|
/*
* Datadog API V1 Collection
*
* Collection of all Datadog Public endpoints.
*
* The version of the OpenAPI document: 1.0
* Contact: support@datadoghq.com
* Generated by: https://openapi-generator.tech
*/
/// CheckCanDeleteSloResponse : A service level objective response containing the requested object.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckCanDeleteSloResponse {
#[serde(rename = "data", skip_serializing_if = "Option::is_none")]
pub data: Option<Box<crate::models::CheckCanDeleteSloResponseData>>,
/// A mapping of SLO id to it's current usages.
#[serde(rename = "errors", skip_serializing_if = "Option::is_none")]
pub errors: Option<::std::collections::HashMap<String, String>>,
}
impl CheckCanDeleteSloResponse {
/// A service level objective response containing the requested object.
pub fn new() -> CheckCanDeleteSloResponse {
CheckCanDeleteSloResponse {
data: None,
errors: None,
}
}
}
|
use super::super::{
program::MaskProgram,
webgl::{WebGlF32Vbo, WebGlI16Ibo, WebGlRenderingContext},
ModelMatrix,
};
use super::TableBlock;
use crate::{
block::{self, BlockId},
Color,
};
use ndarray::Array2;
use std::collections::HashMap;
pub struct AreaCollectionRenderer {
vertexis_buffer: WebGlF32Vbo,
texture_coord_buffer: WebGlF32Vbo,
index_buffer: WebGlI16Ibo,
}
impl AreaCollectionRenderer {
pub fn new(gl: &WebGlRenderingContext) -> Self {
let vertexis_buffer = gl.create_vbo_with_f32array(
&[
[0.5, 0.5, 0.0],
[-0.5, 0.5, 0.0],
[0.5, -0.5, 0.0],
[-0.5, -0.5, 0.0],
]
.concat(),
);
let texture_coord_buffer =
gl.create_vbo_with_f32array(&[[1.0, 1.0], [0.0, 1.0], [1.0, 0.0], [0.0, 0.0]].concat());
let index_buffer = gl.create_ibo_with_i16array(&[0, 1, 2, 3, 2, 1]);
Self {
vertexis_buffer,
texture_coord_buffer,
index_buffer,
}
}
pub fn render<'a>(
&self,
gl: &WebGlRenderingContext,
program: &MaskProgram,
vp_matrix: &Array2<f32>,
block_field: &block::Field,
areas: impl Iterator<Item = &'a BlockId>,
id_map: &mut HashMap<u32, TableBlock>,
) {
gl.set_attribute(&self.vertexis_buffer, &program.a_vertex_location, 3, 0);
gl.set_attribute(
&self.texture_coord_buffer,
&program.a_texture_coord_location,
2,
0,
);
gl.bind_buffer(
web_sys::WebGlRenderingContext::ELEMENT_ARRAY_BUFFER,
Some(&self.index_buffer),
);
for (area_id, area) in block_field.listed::<block::table_object::Area>(areas.collect()) {
let (model_matrix, is_rounded) =
if let block::table_object::area::Type::Line(line_width) = area.type_() {
let line_width = *line_width as f32;
let o = area.org().clone();
let v = area.vec().clone();
let len = (v[0].powi(2) + v[1].powi(2) + v[2].powi(2)).sqrt();
let zr = v[1].atan2(v[0]);
let mm: Array2<f32> = ModelMatrix::new()
.with_scale(&[len, line_width, 0.0])
.with_z_axis_rotation(zr)
.with_movement(&[o[0] + v[0] / 2.0, o[1] + v[1] / 2.0, o[2] + v[2] / 2.0])
.into();
(mm, false)
} else {
let o = area.org();
let v = area.vec().clone();
let len = (v[0].powi(2) + v[1].powi(2) + v[2].powi(2)).sqrt() * 2.0;
let mm: Array2<f32> = ModelMatrix::new()
.with_scale(&[len, len, 0.0])
.with_movement(o)
.into();
(mm, true)
};
let mvp_matrix = vp_matrix.dot(&model_matrix);
let mvp_matrix = mvp_matrix.t();
gl.uniform_matrix4fv_with_f32_array(
Some(&program.u_translate_location),
false,
&[
mvp_matrix.row(0).to_vec(),
mvp_matrix.row(1).to_vec(),
mvp_matrix.row(2).to_vec(),
mvp_matrix.row(3).to_vec(),
]
.concat()
.into_iter()
.map(|a| a as f32)
.collect::<Vec<f32>>(),
);
gl.uniform1i(
Some(&program.u_flag_round_location),
if is_rounded { 1 } else { 0 },
);
let color = Color::from(id_map.len() as u32 | 0xFF000000);
gl.uniform4fv_with_f32_array(
Some(&program.u_mask_color_location),
&color.to_f32array(),
);
gl.draw_elements_with_i32(
web_sys::WebGlRenderingContext::TRIANGLES,
6,
web_sys::WebGlRenderingContext::UNSIGNED_SHORT,
0,
);
id_map.insert(color.to_u32(), TableBlock::new(area_id.clone(), 0));
}
}
}
|
// Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![deny(warnings)]
extern crate byteorder;
extern crate failure;
#[macro_use] extern crate fdio;
extern crate fidl;
extern crate fidl_fuchsia_wlan_tap as wlantap;
extern crate fuchsia_async as async;
extern crate fuchsia_zircon as zx;
extern crate futures;
use byteorder::{NativeEndian, WriteBytesExt};
use failure::Error;
use fdio::{fdio_sys, ioctl};
use fidl::encoding2::{Encoder};
use zx::AsHandleRef;
use std::fs::{File, OpenOptions};
use std::os::raw;
use std::mem;
use std::path::Path;
pub struct Wlantap {
file: File,
}
impl Wlantap {
pub fn open() -> Result<Wlantap, Error> {
Ok(Wlantap{
file: OpenOptions::new().read(true).write(true)
.open(Path::new("/dev/test/wlantapctl"))?,
})
}
pub fn create_phy(&self, mut config: wlantap::WlantapPhyConfig)
-> Result<wlantap::WlantapPhyProxy, Error>
{
let (encoded_config, handles) = (&mut vec![], &mut vec![]);
Encoder::encode(encoded_config, handles, &mut config)?;
let (local, remote) = zx::Channel::create()?;
let mut ioctl_in = vec![];
ioctl_in.write_u32::<NativeEndian>(remote.raw_handle())?;
ioctl_in.append(encoded_config);
// Safe because the length of the buffer is computed from the length of a vector,
// and ioctl() doesn't retain the buffer.
unsafe {
ioctl(&self.file,
IOCTL_WLANTAP_CREATE_WLANPHY,
ioctl_in.as_ptr() as *const std::os::raw::c_void,
ioctl_in.len(),
std::ptr::null_mut(),
0)?;
}
// Release ownership of the remote handle
mem::forget(remote);
Ok(wlantap::WlantapPhyProxy::new(async::Channel::from_channel(local)?))
}
}
const IOCTL_WLANTAP_CREATE_WLANPHY: raw::c_int = make_ioctl!(
fdio_sys::IOCTL_KIND_SET_HANDLE,
fdio_sys::IOCTL_FAMILY_WLANTAP,
0
);
|
use crate::common::factories::prelude::*;
use std::net::{IpAddr, SocketAddr, SocketAddrV4, SocketAddrV6};
#[derive(Debug, Clone)]
pub struct SocketAddrBuilder {
pub ip_addr: IpAddr,
pub port: u16,
}
impl SocketAddrBuilder {
pub fn localhost_with_port(port: u16) -> Self {
Self {
ip_addr: IpAddrBuilder::localhost(),
port,
}
}
}
impl Default for SocketAddrBuilder {
fn default() -> Self {
Self {
ip_addr: IpAddrBuilder::default().build(),
port: 5060,
}
}
}
impl Into<SocketAddr> for SocketAddrBuilder {
fn into(self) -> SocketAddr {
match self.ip_addr {
IpAddr::V4(ipv4_addr) => SocketAddr::V4(SocketAddrV4::new(ipv4_addr, self.port)),
IpAddr::V6(ipv6_addr) => SocketAddr::V6(SocketAddrV6::new(ipv6_addr, self.port, 0, 0)),
}
}
}
impl From<(IpAddr, u16)> for SocketAddrBuilder {
fn from(tuple: (IpAddr, u16)) -> Self {
Self {
ip_addr: tuple.0,
port: tuple.1,
}
}
}
|
use log::{Level, Log, Metadata, Record};
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::{io, io::Write};
use crate::utils::time::{duration_since_epoch, timestamp_format, PROGRAM_START};
pub struct FileLoggerOptions {
/// log directory
pub directory: &'static str,
/// current log name
pub filename: &'static str,
/// maximum log size in bytes
pub max_size: u64,
/// maximum amount of files to keep in rotation
pub max_files: usize,
}
impl FileLoggerOptions {
pub fn new(filename: &'static str) -> Self {
Self {
directory: "logs/",
filename,
max_size: 5 * 1024 * 1024, // 5MB
max_files: 10,
}
}
}
pub struct FileLogger {
level: Level,
options: FileLoggerOptions,
}
impl FileLogger {
pub fn new(level: Level, options: FileLoggerOptions) -> Self {
Self { level, options }
}
/// path to the current log file
fn log_path(&self) -> PathBuf {
Path::new(self.options.directory).join(format!("{}.log", self.options.filename))
}
/// handle to the current log file, creating it if necessary
fn log_file(&self) -> io::Result<fs::File> {
let create_file = |_| {
let _ = fs::create_dir_all(Path::new(self.options.directory));
fs::File::create(self.log_path())
};
fs::OpenOptions::new()
.append(true)
.open(self.log_path())
.or_else(create_file)
}
/// save current log but only keep n most recent files
fn rotate_logs(&self) {
// backup current log by timestamping it
let new_file_path = Path::new(self.options.directory)
.join(format!("{}.log", duration_since_epoch().as_millis()));
if let Err(err) = fs::rename(self.log_path(), new_file_path) {
panic!("could not rotate log files - {}", err);
}
// remove oldest log file
let count = fs::read_dir(self.options.directory)
.map(|dir| dir.count())
.ok();
if count > Some(self.options.max_files) {
let _ = fs::read_dir(self.options.directory).map(|dir_entry: fs::ReadDir| {
// remove first file in lexicographical order (oldest for timestamped files)
dir_entry
.filter_map(|entry| entry.ok())
.flat_map(|entry: fs::DirEntry| {
entry
.path()
.file_stem()
.and_then(|s| s.to_str().map(|s| s.to_string()))
}) // iterator over file stems, as String (representing timestamps)
.min() // gets the first, so the oldest
.and_then(|stem: String| {
fs::remove_file(
Path::new(self.options.directory).join(format!("{}.log", stem)),
)
.ok()
})
});
}
}
}
/// rotating file implementation for log
impl Log for FileLogger {
fn enabled(&self, meta: &Metadata<'_>) -> bool {
meta.level() <= self.level
}
fn log(&self, record: &Record<'_>) {
if let Ok(file) = self.log_file().as_mut() {
let data = format!(
"{} {}: {}\n",
timestamp_format(PROGRAM_START.elapsed()),
record.level(),
record.args()
);
let _ = file.write(data.as_bytes());
if let Ok(size) = file.metadata().map(|meta| meta.len()) {
if size >= self.options.max_size {
self.rotate_logs();
}
}
} else {
println!("ERROR: could not open log file");
}
}
fn flush(&self) {}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn log_enabled_debug() {
let logger = FileLogger::new(log::Level::Debug, FileLoggerOptions::new("test"));
assert!(logger.enabled(&log::Metadata::builder().level(log::Level::Error).build()));
assert!(logger.enabled(&log::Metadata::builder().level(log::Level::Warn).build()));
assert!(logger.enabled(&log::Metadata::builder().level(log::Level::Info).build()));
assert!(logger.enabled(&log::Metadata::builder().level(log::Level::Debug).build()));
assert!(!logger.enabled(&log::Metadata::builder().level(log::Level::Trace).build()));
}
#[test]
fn log_enabled_error() {
let logger = FileLogger::new(log::Level::Error, FileLoggerOptions::new("test"));
assert!(logger.enabled(&log::Metadata::builder().level(log::Level::Error).build()));
assert!(!logger.enabled(&log::Metadata::builder().level(log::Level::Warn).build()));
assert!(!logger.enabled(&log::Metadata::builder().level(log::Level::Info).build()));
assert!(!logger.enabled(&log::Metadata::builder().level(log::Level::Debug).build()));
assert!(!logger.enabled(&log::Metadata::builder().level(log::Level::Trace).build()));
}
}
|
use steel::steel_vm::engine::Engine;
// It's possible to add a function that will get fun on every instruction call
// For instance, if you wanted to see how far you were getting in the evaluation of a program
// (perhaps you wanted to see that, idk) then you could add the closure using the
// `on_progress` method
//
// This is how the repl instruments the CTRL-C handler, by registering a closure that
// listens to the receiving end of a interrupt stream
pub fn main() {
let mut vm = Engine::new();
vm.on_progress(|count| {
// parameter is 'usize' - number of instructions performed up to this point
if count % 1000 == 0 {
// print out a progress log every 1000 operations
println!("Number of instructions up to this point: {}", count);
// Returning false here would quit the evaluation of the function
return true;
}
true
});
// This should end with "Number of instructions up to this point: 4000"
vm.run(
r#"
(define (loop x)
(if (equal? x 1000)
x
(loop (+ x 1))))
(displayln (loop 0))
"#,
)
.unwrap();
}
|
use std::iter::{Fuse, Peekable};
use std::str::Chars;
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum Op {
Add,
Sub,
Mul,
Div,
Eq,
Neq,
Lt,
Gt,
Lte,
Gte,
Not,
And,
Or,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Token {
If,
Then,
Else,
True,
False,
LeftParen,
RightParen,
LeftBracket,
RightBracket,
Comma,
Assign,
Arrow,
Colon,
Unknown(char),
Number(String),
Ident(String),
Operator(Op),
End,
}
impl Op {
pub fn name(self) -> String {
match self {
Op::Add => "+",
Op::Sub => "-",
Op::Mul => "*",
Op::Div => "/",
Op::Eq => "==",
Op::Neq => "!=",
Op::Lt => "<",
Op::Gt => ">",
Op::Lte => "<=",
Op::Gte => ">=",
Op::Not => "not",
Op::And => "and",
Op::Or => "or",
}
.into()
}
}
impl Token {
pub fn name(&self) -> String {
match self {
Token::LeftBracket => "[",
Token::RightBracket => "]",
Token::LeftParen => "(",
Token::RightParen => ")",
Token::Comma => ",",
Token::Assign => "=",
Token::Arrow => "=>",
Token::Number(x) => x,
Token::Ident(x) => x,
Token::Unknown(c) => return c.to_string(),
Token::Operator(x) => return x.name(),
Token::True => "true",
Token::False => "false",
Token::If => "if",
Token::Then => "then",
Token::Else => "else",
Token::End => "<end>",
Token::Colon => ":",
}
.into()
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct Span(pub usize, pub usize);
struct CharStream<'a> {
index: usize,
iterator: Peekable<Fuse<Chars<'a>>>,
}
impl<'a> CharStream<'a> {
fn new(line: &'a str) -> CharStream<'a> {
Self {
index: 0,
iterator: line.chars().fuse().peekable(),
}
}
fn next(&mut self) -> char {
self.index += 1;
self.iterator.next().unwrap_or('\0')
}
fn peek(&mut self) -> char {
self.iterator.peek().cloned().unwrap_or('\0')
}
}
pub struct Lexer {
index: usize,
tokens: Vec<Token>,
spans: Vec<Span>,
}
impl Lexer {
const DIGITS: &'static str = "0123456789.";
const IDENTS: &'static str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_";
fn parse_token(stream: &mut CharStream) -> Token {
let mut c = stream.peek();
if Self::IDENTS.contains(c) {
let mut buffer = String::new();
while Self::IDENTS.contains(c) {
buffer.push(c);
stream.next();
c = stream.peek()
}
return match buffer.as_ref() {
"and" => Token::Operator(Op::And),
"or" => Token::Operator(Op::Or),
"not" => Token::Operator(Op::Not),
"true" => Token::True,
"false" => Token::False,
"if" => Token::If,
"then" => Token::Then,
"else" => Token::Else,
_ => Token::Ident(buffer),
};
}
if Self::DIGITS.contains(c) {
let mut buffer = String::new();
while Self::DIGITS.contains(c) {
buffer.push(c);
stream.next();
c = stream.peek()
}
return Token::Number(buffer);
}
stream.next();
'a: loop {
let tok = match (c, stream.peek()) {
('=', '=') => Token::Operator(Op::Eq),
('!', '=') => Token::Operator(Op::Neq),
('<', '=') => Token::Operator(Op::Lte),
('>', '=') => Token::Operator(Op::Gte),
('=', '>') => Token::Arrow,
_ => break 'a,
};
stream.next();
return tok;
}
match c {
'[' => Token::LeftBracket,
']' => Token::RightBracket,
'(' => Token::LeftParen,
')' => Token::RightParen,
',' => Token::Comma,
'=' => Token::Assign,
':' => Token::Colon,
'+' => Token::Operator(Op::Add),
'-' => Token::Operator(Op::Sub),
'*' => Token::Operator(Op::Mul),
'/' => Token::Operator(Op::Div),
'>' => Token::Operator(Op::Gt),
'<' => Token::Operator(Op::Lt),
c => Token::Unknown(c),
}
}
fn new(line: &str) -> Lexer {
let mut stream = CharStream::new(line);
let mut tokens = vec![];
let mut spans = vec![];
loop {
let c = stream.peek();
if c == '\0' {
break;
} else if c.is_whitespace() {
stream.next();
continue;
} else {
let begin = stream.index;
let token = Self::parse_token(&mut stream);
let end = stream.index;
tokens.push(token);
spans.push(Span(begin, end));
}
}
let index = stream.index;
spans.push(Span(index, index + 1));
Lexer {
index: 0,
tokens,
spans,
}
}
pub fn peek(&self) -> Token {
self.tokens.get(self.index).cloned().unwrap_or(Token::End)
}
pub fn next(&mut self) -> Token {
let tok = self.peek();
self.index += 1;
tok
}
pub fn prev(&mut self) {
assert!(self.index > 0);
self.index -= 1;
}
pub fn span(&self) -> Span {
if self.index < self.tokens.len() {
self.spans[self.index]
} else {
*self.spans.last().unwrap()
}
}
}
pub fn tokenize(line: &str) -> Lexer {
Lexer::new(line)
}
#[cfg(test)]
mod test {
use super::{tokenize, CharStream, Op, Token};
#[test]
fn test_charstream() {
let line = "abc";
let mut stream = CharStream::new(line);
assert_eq!(stream.peek(), 'a');
assert_eq!(stream.next(), 'a');
assert_eq!(stream.peek(), 'b');
assert_eq!(stream.next(), 'b');
assert_eq!(stream.peek(), 'c');
assert_eq!(stream.next(), 'c');
assert_eq!(stream.peek(), '\0');
assert_eq!(stream.next(), '\0');
}
fn test_match(string: &str, tokens: impl IntoIterator<Item = Token>) {
let mut lexer = tokenize(string);
for tok in tokens {
assert_eq!(lexer.next(), tok);
}
assert_eq!(lexer.next(), Token::End);
}
#[test]
fn test_operators() {
let string = "+ - * / not and or == != < > <= >=";
let tokens = vec![
Op::Add,
Op::Sub,
Op::Mul,
Op::Div,
Op::Not,
Op::And,
Op::Or,
Op::Eq,
Op::Neq,
Op::Lt,
Op::Gt,
Op::Lte,
Op::Gte,
]
.into_iter()
.map(|x| Token::Operator(x));
test_match(string, tokens);
}
#[test]
fn test_tokens() {
let string = "( ) [ ] , = => : ?";
let tokens = vec![
Token::LeftParen,
Token::RightParen,
Token::LeftBracket,
Token::RightBracket,
Token::Comma,
Token::Assign,
Token::Arrow,
Token::Colon,
Token::Unknown('?'),
];
test_match(string, tokens);
}
#[test]
fn test_idents() {
let string = "true false if then else or and not foo";
let tokens = vec![
Token::True,
Token::False,
Token::If,
Token::Then,
Token::Else,
Token::Operator(Op::Or),
Token::Operator(Op::And),
Token::Operator(Op::Not),
Token::Ident("foo".into()),
];
test_match(string, tokens);
}
#[test]
fn test_numbers() {
let string = "1 .2 3. 4.5";
let tokens = vec![
Token::Number("1".into()),
Token::Number(".2".into()),
Token::Number("3.".into()),
Token::Number("4.5".into()),
];
test_match(string, tokens);
}
#[test]
fn test_basic() {
let string = "compare(a, ~)";
let tokens = vec![
Token::Ident("compare".into()),
Token::LeftParen,
Token::Ident("a".into()),
Token::Comma,
Token::Unknown('~'),
Token::RightParen,
];
test_match(string, tokens);
}
#[test]
fn test_prev_peek_next() {
let string = "a b c";
let mut lexer = tokenize(string);
let a = Token::Ident("a".into());
let b = Token::Ident("b".into());
let c = Token::Ident("c".into());
let end = Token::End;
assert_eq!(lexer.peek(), a);
assert_eq!(lexer.next(), a);
assert_eq!(lexer.peek(), b);
lexer.prev();
assert_eq!(lexer.peek(), a);
assert_eq!(lexer.next(), a);
assert_eq!(lexer.next(), b);
assert_eq!(lexer.next(), c);
lexer.prev();
assert_eq!(lexer.next(), c);
assert_eq!(lexer.next(), Token::End);
}
}
|
//! Includes standard bundled widgets.
pub mod button;
pub mod scroll;
pub mod list;
pub mod slider;
pub mod edit_text;
pub mod image;
pub mod glcanvas;
pub mod text;
pub mod prelude {
pub use super::text::StaticTextStyle;
pub use super::button::{ButtonStyle, ToggleButtonStyle, ToggleEvent};
pub use super::edit_text::{EditText, TextUpdated};
pub use super::slider::{Slider, SetSliderValue, SliderEvent};
pub use super::list::{List, ListItemSelected, ItemSelected, ListItemHandler};
pub use super::scroll::ScrollContainer;
pub use super::image::Image;
pub use super::glcanvas::{GLCanvasBuilder, GLCanvasState};
}
|
//! Crate for interacting with the Kubernetes API
//!
//! This crate includes the tools for manipulating Kubernetes resources as
//! well as keeping track of those resources as they change over time
//!
//! # Example
//!
//! The following example will create a [`Pod`](k8s_openapi::api::core::v1::Pod)
//! and then watch for it to become available using a manual [`Api::watch`] call.
//!
//! ```rust,no_run
//! use futures::{StreamExt, TryStreamExt};
//! use kube_client::api::{Api, ResourceExt, ListParams, PatchParams, Patch};
//! use kube_client::Client;
//! use k8s_openapi::api::core::v1::Pod;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Read the environment to find config for kube client.
//! // Note that this tries an in-cluster configuration first,
//! // then falls back on a kubeconfig file.
//! let client = Client::try_default().await?;
//!
//! // Interact with pods in the configured namespace with the typed interface from k8s-openapi
//! let pods: Api<Pod> = Api::default_namespaced(client);
//!
//! // Create a Pod (cheating here with json, but it has to validate against the type):
//! let patch: Pod = serde_json::from_value(serde_json::json!({
//! "apiVersion": "v1",
//! "kind": "Pod",
//! "metadata": {
//! "name": "my-pod"
//! },
//! "spec": {
//! "containers": [
//! {
//! "name": "my-container",
//! "image": "myregistry.azurecr.io/hello-world:v1",
//! },
//! ],
//! }
//! }))?;
//!
//! // Apply the Pod via server-side apply
//! let params = PatchParams::apply("myapp");
//! let result = pods.patch("my-pod", ¶ms, &Patch::Apply(&patch)).await?;
//!
//! // List pods in the configured namespace
//! for p in pods.list(&ListParams::default()).await? {
//! println!("found pod {}", p.name_any());
//! }
//!
//! Ok(())
//! }
//! ```
//!
//! For more details, see:
//!
//! - [`Client`](crate::client) for the extensible Kubernetes client
//! - [`Config`](crate::config) for the Kubernetes config abstraction
//! - [`Api`](crate::Api) for the generic api methods available on Kubernetes resources
//! - [k8s-openapi](https://docs.rs/k8s-openapi/*/k8s_openapi/) for how to create typed kubernetes objects directly
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
#![forbid(unsafe_code)]
// Nightly clippy (0.1.64) considers Drop a side effect, see https://github.com/rust-lang/rust-clippy/issues/9608
#![allow(clippy::unnecessary_lazy_evaluations)]
macro_rules! cfg_client {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
#[cfg(feature = "client")]
$item
)*
}
}
macro_rules! cfg_config {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
#[cfg(feature = "config")]
$item
)*
}
}
macro_rules! cfg_error {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(any(feature = "config", feature = "client"))))]
#[cfg(any(feature = "config", feature = "client"))]
$item
)*
}
}
cfg_client! {
pub mod api;
pub mod discovery;
pub mod client;
#[doc(inline)]
pub use api::Api;
#[doc(inline)]
pub use client::Client;
#[doc(inline)]
pub use discovery::Discovery;
}
cfg_config! {
pub mod config;
#[doc(inline)]
pub use config::Config;
}
cfg_error! {
pub mod error;
#[doc(inline)] pub use error::Error;
/// Convient alias for `Result<T, Error>`
pub type Result<T, E = Error> = std::result::Result<T, E>;
}
pub use crate::core::{CustomResourceExt, Resource, ResourceExt};
/// Re-exports from kube_core
pub use kube_core as core;
// Tests that require a cluster and the complete feature set
// Can be run with `cargo test -p kube-client --lib features=rustls-tls,ws -- --ignored`
#[cfg(all(feature = "client", feature = "config"))]
#[cfg(test)]
mod test {
#![allow(unused_imports)]
use crate::{
api::{AttachParams, AttachedProcess},
client::ConfigExt,
Api, Client, Config, ResourceExt,
};
use futures::{StreamExt, TryStreamExt};
use k8s_openapi::api::core::v1::Pod;
use kube_core::{
params::{DeleteParams, Patch, WatchParams},
response::StatusSummary,
};
use serde_json::json;
use tower::ServiceBuilder;
// hard disabled test atm due to k3d rustls issues: https://github.com/kube-rs/kube/issues?q=is%3Aopen+is%3Aissue+label%3Arustls
#[cfg(feature = "when_rustls_works_with_k3d")]
#[tokio::test]
#[ignore = "needs cluster (lists pods)"]
#[cfg(feature = "rustls-tls")]
async fn custom_client_rustls_configuration() -> Result<(), Box<dyn std::error::Error>> {
let config = Config::infer().await?;
let https = config.rustls_https_connector()?;
let service = ServiceBuilder::new()
.layer(config.base_uri_layer())
.service(hyper::Client::builder().build(https));
let client = Client::new(service, config.default_namespace);
let pods: Api<Pod> = Api::default_namespaced(client);
pods.list(&Default::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists pods)"]
#[cfg(feature = "openssl-tls")]
async fn custom_client_openssl_tls_configuration() -> Result<(), Box<dyn std::error::Error>> {
let config = Config::infer().await?;
let https = config.openssl_https_connector()?;
let service = ServiceBuilder::new()
.layer(config.base_uri_layer())
.service(hyper::Client::builder().build(https));
let client = Client::new(service, config.default_namespace);
let pods: Api<Pod> = Api::default_namespaced(client);
pods.list(&Default::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists api resources)"]
#[cfg(feature = "discovery")]
async fn group_discovery_oneshot() -> Result<(), Box<dyn std::error::Error>> {
use crate::{core::DynamicObject, discovery};
let client = Client::try_default().await?;
let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
let (ar, _caps) = apigroup.recommended_kind("APIService").unwrap();
let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
api.list(&Default::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create and edit a pod)"]
async fn pod_can_use_core_apis() -> Result<(), Box<dyn std::error::Error>> {
use kube::api::{DeleteParams, ListParams, Patch, PatchParams, PostParams, WatchEvent};
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 30s
let p: Pod = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube1",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 30"],
}],
}
}))?;
let pp = PostParams::default();
match pods.create(&pp, &p).await {
Ok(o) => assert_eq!(p.name_unchecked(), o.name_unchecked()),
Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
Err(e) => return Err(e.into()), // any other case if a failure
}
// Manual watch-api for it to become ready
// NB: don't do this; using conditions (see pod_api example) is easier and less error prone
let wp = WatchParams::default()
.fields(&format!("metadata.name={}", "busybox-kube1"))
.timeout(15);
let mut stream = pods.watch(&wp, "0").await?.boxed();
while let Some(ev) = stream.try_next().await? {
// can debug format watch event
let _ = format!("we: {ev:?}");
match ev {
WatchEvent::Modified(o) => {
let s = o.status.as_ref().expect("status exists on pod");
let phase = s.phase.clone().unwrap_or_default();
if phase == "Running" {
break;
}
}
WatchEvent::Error(e) => panic!("watch error: {e}"),
_ => {}
}
}
// Verify we can get it
let mut pod = pods.get("busybox-kube1").await?;
assert_eq!(p.spec.as_ref().unwrap().containers[0].name, "busybox");
// verify replace with explicit resource version
// NB: don't do this; use server side apply
{
assert!(pod.resource_version().is_some());
pod.spec.as_mut().unwrap().active_deadline_seconds = Some(5);
let pp = PostParams::default();
let patched_pod = pods.replace("busybox-kube1", &pp, &pod).await?;
assert_eq!(patched_pod.spec.unwrap().active_deadline_seconds, Some(5));
}
// Delete it
let dp = DeleteParams::default();
pods.delete("busybox-kube1", &dp).await?.map_left(|pdel| {
assert_eq!(pdel.name_unchecked(), "busybox-kube1");
});
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create and attach to a pod)"]
#[cfg(feature = "ws")]
async fn pod_can_exec_and_write_to_stdin() -> Result<(), Box<dyn std::error::Error>> {
use crate::api::{DeleteParams, ListParams, Patch, PatchParams, WatchEvent};
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 30s
let p: Pod = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube2",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 30"],
}],
}
}))?;
match pods.create(&Default::default(), &p).await {
Ok(o) => assert_eq!(p.name_unchecked(), o.name_unchecked()),
Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
Err(e) => return Err(e.into()), // any other case if a failure
}
// Manual watch-api for it to become ready
// NB: don't do this; using conditions (see pod_api example) is easier and less error prone
let wp = WatchParams::default()
.fields(&format!("metadata.name={}", "busybox-kube2"))
.timeout(15);
let mut stream = pods.watch(&wp, "0").await?.boxed();
while let Some(ev) = stream.try_next().await? {
match ev {
WatchEvent::Modified(o) => {
let s = o.status.as_ref().expect("status exists on pod");
let phase = s.phase.clone().unwrap_or_default();
if phase == "Running" {
break;
}
}
WatchEvent::Error(e) => panic!("watch error: {e}"),
_ => {}
}
}
// Verify exec works and we can get the output
{
let mut attached = pods
.exec(
"busybox-kube2",
vec!["sh", "-c", "for i in $(seq 1 3); do echo $i; done"],
&AttachParams::default().stderr(false),
)
.await?;
let stdout = tokio_util::io::ReaderStream::new(attached.stdout().unwrap());
let out = stdout
.filter_map(|r| async { r.ok().and_then(|v| String::from_utf8(v.to_vec()).ok()) })
.collect::<Vec<_>>()
.await
.join("");
attached.join().await.unwrap();
assert_eq!(out.lines().count(), 3);
assert_eq!(out, "1\n2\n3\n");
}
// Verify we can write to Stdin
{
use tokio::io::AsyncWriteExt;
let mut attached = pods
.exec(
"busybox-kube2",
vec!["sh"],
&AttachParams::default().stdin(true).stderr(false),
)
.await?;
let mut stdin_writer = attached.stdin().unwrap();
let mut stdout_stream = tokio_util::io::ReaderStream::new(attached.stdout().unwrap());
let next_stdout = stdout_stream.next();
stdin_writer.write_all(b"echo test string 1\n").await?;
let stdout = String::from_utf8(next_stdout.await.unwrap().unwrap().to_vec()).unwrap();
println!("{stdout}");
assert_eq!(stdout, "test string 1\n");
// AttachedProcess resolves with status object.
// Send `exit 1` to get a failure status.
stdin_writer.write_all(b"exit 1\n").await?;
let status = attached.take_status().unwrap();
if let Some(status) = status.await {
println!("{status:?}");
assert_eq!(status.status, Some("Failure".to_owned()));
assert_eq!(status.reason, Some("NonZeroExitCode".to_owned()));
}
}
// Delete it
let dp = DeleteParams::default();
pods.delete("busybox-kube2", &dp).await?.map_left(|pdel| {
assert_eq!(pdel.name_unchecked(), "busybox-kube2");
});
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create and tail logs from a pod)"]
async fn can_get_pod_logs_and_evict() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
api::{DeleteParams, EvictParams, ListParams, Patch, PatchParams, WatchEvent},
core::subresource::LogParams,
};
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 30s
let p: Pod = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube3",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "for i in $(seq 1 5); do echo kube $i; sleep 0.1; done"],
}],
}
}))?;
match pods.create(&Default::default(), &p).await {
Ok(o) => assert_eq!(p.name_unchecked(), o.name_unchecked()),
Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
Err(e) => return Err(e.into()), // any other case if a failure
}
// Manual watch-api for it to become ready
// NB: don't do this; using conditions (see pod_api example) is easier and less error prone
let wp = WatchParams::default()
.fields(&format!("metadata.name={}", "busybox-kube3"))
.timeout(15);
let mut stream = pods.watch(&wp, "0").await?.boxed();
while let Some(ev) = stream.try_next().await? {
match ev {
WatchEvent::Modified(o) => {
let s = o.status.as_ref().expect("status exists on pod");
let phase = s.phase.clone().unwrap_or_default();
if phase == "Running" {
break;
}
}
WatchEvent::Error(e) => panic!("watch error: {e}"),
_ => {}
}
}
// Get current list of logs
let lp = LogParams {
follow: true,
..LogParams::default()
};
let mut logs_stream = pods.log_stream("busybox-kube3", &lp).await?.boxed();
// wait for container to finish
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
let all_logs = pods.logs("busybox-kube3", &Default::default()).await?;
assert_eq!(all_logs, "kube 1\nkube 2\nkube 3\nkube 4\nkube 5\n");
// individual logs may or may not buffer
let mut output = String::new();
while let Some(line) = logs_stream.try_next().await? {
output.push_str(&String::from_utf8_lossy(&line));
}
assert_eq!(output, "kube 1\nkube 2\nkube 3\nkube 4\nkube 5\n");
// evict the pod
let ep = EvictParams::default();
let eres = pods.evict("busybox-kube3", &ep).await?;
assert_eq!(eres.code, 201); // created
assert!(eres.is_success());
Ok(())
}
#[tokio::test]
#[ignore = "requires a cluster"]
async fn can_operate_on_pod_metadata() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
api::{DeleteParams, EvictParams, ListParams, Patch, PatchParams, WatchEvent},
core::subresource::LogParams,
};
use kube_core::{ObjectList, ObjectMeta, PartialObjectMeta, PartialObjectMetaExt};
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 30s
let p: Pod = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube-meta",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 30s"],
}],
}
}))?;
match pods.create(&Default::default(), &p).await {
Ok(o) => assert_eq!(p.name_unchecked(), o.name_unchecked()),
Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
Err(e) => return Err(e.into()), // any other case if a failure
}
// Test we can get a pod as a PartialObjectMeta and convert to
// ObjectMeta
let pod_metadata = pods.get_metadata("busybox-kube-meta").await?;
assert_eq!("busybox-kube-meta", pod_metadata.name_any());
assert_eq!(
Some((&"app".to_string(), &"kube-rs-test".to_string())),
pod_metadata.labels().get_key_value("app")
);
// Test we can get a list of PartialObjectMeta for pods
let p_list = pods.list_metadata(&ListParams::default()).await?;
// Find only pod we are concerned with in this test and fail eagerly if
// name doesn't exist
let pod_metadata = p_list
.items
.into_iter()
.find(|p| p.name_any() == "busybox-kube-meta")
.unwrap();
assert_eq!(
pod_metadata.labels().get("app"),
Some(&"kube-rs-test".to_string())
);
// Attempt to patch pod metadata
let patch = ObjectMeta {
annotations: Some([("test".to_string(), "123".to_string())].into()),
..Default::default()
}
.into_request_partial::<Pod>();
let patchparams = PatchParams::default();
let p_patched = pods
.patch_metadata("busybox-kube-meta", &patchparams, &Patch::Merge(&patch))
.await?;
assert_eq!(p_patched.annotations().get("test"), Some(&"123".to_string()));
assert_eq!(p_patched.types.as_ref().unwrap().kind, "PartialObjectMetadata");
assert_eq!(p_patched.types.as_ref().unwrap().api_version, "meta.k8s.io/v1");
// Clean-up
let dp = DeleteParams::default();
pods.delete("busybox-kube-meta", &dp).await?.map_left(|pdel| {
assert_eq!(pdel.name_any(), "busybox-kube-meta");
});
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create a CertificateSigningRequest)"]
async fn csr_can_be_approved() -> Result<(), Box<dyn std::error::Error>> {
use crate::api::PostParams;
use k8s_openapi::api::certificates::v1::{
CertificateSigningRequest, CertificateSigningRequestCondition, CertificateSigningRequestStatus,
};
let csr_name = "fake";
let dummy_csr: CertificateSigningRequest = serde_json::from_value(json!({
"apiVersion": "certificates.k8s.io/v1",
"kind": "CertificateSigningRequest",
"metadata": { "name": csr_name },
"spec": {
"request": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZqQ0NBVDRDQVFBd0VURVBNQTBHQTFVRUF3d0dZVzVuWld4aE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRgpBQU9DQVE4QU1JSUJDZ0tDQVFFQTByczhJTHRHdTYxakx2dHhWTTJSVlRWMDNHWlJTWWw0dWluVWo4RElaWjBOCnR2MUZtRVFSd3VoaUZsOFEzcWl0Qm0wMUFSMkNJVXBGd2ZzSjZ4MXF3ckJzVkhZbGlBNVhwRVpZM3ExcGswSDQKM3Z3aGJlK1o2MVNrVHF5SVBYUUwrTWM5T1Nsbm0xb0R2N0NtSkZNMUlMRVI3QTVGZnZKOEdFRjJ6dHBoaUlFMwpub1dtdHNZb3JuT2wzc2lHQ2ZGZzR4Zmd4eW8ybmlneFNVekl1bXNnVm9PM2ttT0x1RVF6cXpkakJ3TFJXbWlECklmMXBMWnoyalVnald4UkhCM1gyWnVVV1d1T09PZnpXM01LaE8ybHEvZi9DdS8wYk83c0x0MCt3U2ZMSU91TFcKcW90blZtRmxMMytqTy82WDNDKzBERHk5aUtwbXJjVDBnWGZLemE1dHJRSURBUUFCb0FBd0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBR05WdmVIOGR4ZzNvK21VeVRkbmFjVmQ1N24zSkExdnZEU1JWREkyQTZ1eXN3ZFp1L1BVCkkwZXpZWFV0RVNnSk1IRmQycVVNMjNuNVJsSXJ3R0xuUXFISUh5VStWWHhsdnZsRnpNOVpEWllSTmU3QlJvYXgKQVlEdUI5STZXT3FYbkFvczFqRmxNUG5NbFpqdU5kSGxpT1BjTU1oNndLaTZzZFhpVStHYTJ2RUVLY01jSVUyRgpvU2djUWdMYTk0aEpacGk3ZnNMdm1OQUxoT045UHdNMGM1dVJVejV4T0dGMUtCbWRSeEgvbUNOS2JKYjFRQm1HCkkwYitEUEdaTktXTU0xMzhIQXdoV0tkNjVoVHdYOWl4V3ZHMkh4TG1WQzg0L1BHT0tWQW9FNkpsYWFHdTlQVmkKdjlOSjVaZlZrcXdCd0hKbzZXdk9xVlA3SVFjZmg3d0drWm89Ci0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo=",
"signerName": "kubernetes.io/kube-apiserver-client",
"expirationSeconds": 86400,
"usages": ["client auth"]
}
}))?;
let client = Client::try_default().await?;
let csr: Api<CertificateSigningRequest> = Api::all(client.clone());
assert!(csr.create(&PostParams::default(), &dummy_csr).await.is_ok());
// Patch the approval and approve the CSR
let approval_type = "ApprovedFake";
let csr_status: CertificateSigningRequestStatus = CertificateSigningRequestStatus {
certificate: None,
conditions: Some(vec![CertificateSigningRequestCondition {
type_: approval_type.to_string(),
last_update_time: None,
last_transition_time: None,
message: Some(format!("{} {}", approval_type, "by kube-rs client")),
reason: Some("kube-rsClient".to_string()),
status: "True".to_string(),
}]),
};
let csr_status_patch = Patch::Merge(serde_json::json!({ "status": csr_status }));
let _ = csr
.patch_approval(csr_name, &Default::default(), &csr_status_patch)
.await?;
let csr_after_approval = csr.get_approval(csr_name).await?;
assert_eq!(
csr_after_approval
.status
.as_ref()
.unwrap()
.conditions
.as_ref()
.unwrap()[0]
.type_,
approval_type.to_string()
);
csr.delete(csr_name, &DeleteParams::default()).await?;
Ok(())
}
}
|
pub mod ns;
pub mod omission;
pub mod void;
pub mod whitespace;
|
use async_trait::async_trait;
use hyper::{body::Body, client::connect::HttpConnector, Client, Response, StatusCode};
use hyper_tls::HttpsConnector;
use native_tls::{Certificate, TlsConnector};
use rhodium::{errors::*, request::*, response::*, stack::*, *};
use std::net::{IpAddr, SocketAddr};
use std::str::FromStr;
use std::sync::Arc;
use std::{thread, time};
// Mock implementations
struct Comm {
return_error: bool,
}
impl CommunicationChannel for Comm {
fn new() -> Comm {
Comm {
return_error: false,
}
}
}
struct Service {}
#[async_trait]
impl RhodService<Comm> for Service {
async fn serve(
&self,
_conn: &RhodConnInfo,
_req: RhodRequest,
comm: &mut Comm,
) -> RhodResult<RhodResponse> {
if comm.return_error {
Err(RhodError::from_str("some error", RhodErrorLevel::Warning))
} else {
let res = Response::builder()
.status(StatusCode::OK)
.body(Body::empty())
.unwrap();
let res = RhodResponse::new(res);
Ok(res)
}
}
}
struct ErrorHandler {}
#[async_trait]
impl RhodHandler<Comm> for ErrorHandler {
async fn handle_request(
&self,
_conn: &RhodConnInfo,
_req: &mut RhodRequest,
comm: &mut Comm,
) -> RhodResult<()> {
comm.return_error = true;
Ok(())
}
async fn catch_request(
&self,
_conn: &RhodConnInfo,
_req: &RhodRequest,
_err: &RhodError,
_comm: &Comm,
) {
}
async fn handle_response(
&self,
_conn: &RhodConnInfo,
res: RhodResponse,
_comm: &mut Comm,
) -> (RhodResponse, RhodResult<()>) {
(res, Ok(()))
}
async fn catch_response(
&self,
_conn: &RhodConnInfo,
_res: &RhodResponse,
_err: &RhodError,
_comm: &Comm,
) {
}
}
fn spawn_rhod(rhod: Rhodium<Comm>) {
//Create new thread for Rhodium
thread::spawn(move || {
use tokio::runtime::Runtime;
// Create the runtime
let rt = Runtime::new().unwrap();
// Execute the future, blocking the current thread until completion
rt.block_on(rhod.run());
});
thread::sleep(time::Duration::from_millis(5000));
}
#[tokio::test]
async fn test_complete_transaction() {
//create server
let stack = RhodStack::new(vec![], Box::new(Service {}));
let rhod = Rhodium::new(
Arc::new(stack),
SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 3000),
protocols::HttpProtocolConf::HTTP,
);
spawn_rhod(rhod);
//Creates client and gets response
let client = Client::new();
let uri = "http://127.0.0.1:3000".parse().unwrap();
client.get(uri).await.unwrap();
}
#[tokio::test]
async fn test_error_handler() {
//create server
let stack = RhodStack::new(
vec![RhodHandlerInStack::RhodHandler(Box::new(ErrorHandler {}))],
Box::new(Service {}),
);
let rhod = Rhodium::new(
Arc::new(stack),
SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 3001),
protocols::HttpProtocolConf::HTTP,
);
spawn_rhod(rhod);
//Creates client and gets response
let client = Client::new();
let uri = "http://127.0.0.1:3001".parse().unwrap();
assert!(client.get(uri).await.is_err());
}
#[tokio::test]
async fn test_ssl() {
//create server
let stack = RhodStack::new(vec![], Box::new(Service {}));
let rhod = Rhodium::new(
Arc::new(stack),
SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 3002),
protocols::HttpProtocolConf::HTTPS {
cert_file: String::from("tests/assets/certs/server.crt"),
key_file: String::from("tests/assets/certs/server.key"),
},
);
spawn_rhod(rhod);
//Reading certificate
const SELF_SIGNED_CERT: &[u8] = include_bytes!("assets/certs/CA.pem");
let cert = Certificate::from_pem(SELF_SIGNED_CERT).unwrap();
//Creating HttpsConnector that trust certificate
let mut http = HttpConnector::new();
http.enforce_http(false);
let mut tls_builder = TlsConnector::builder();
tls_builder.add_root_certificate(cert); // Adds the certificate to the set of roots that the connector will trust
let tls = tls_builder.build().unwrap();
let https = HttpsConnector::from((http, tls.into()));
//Creates client and gets response
let client = Client::builder().build::<_, hyper::Body>(https);
let uri = "https://localhost:3002".parse().unwrap();
client.get(uri).await.unwrap();
}
|
#[doc = "Register `PLL3DIVR` reader"]
pub type R = crate::R<PLL3DIVR_SPEC>;
#[doc = "Register `PLL3DIVR` writer"]
pub type W = crate::W<PLL3DIVR_SPEC>;
#[doc = "Field `PLL3N` reader - Multiplication factor for PLL3VCO Set and reset by software to control the multiplication factor of the VCO. These bits can be written only when the PLL is disabled (PLL3ON = 0 and PLL3RDY = 0). ... ... Others: reserved"]
pub type PLL3N_R = crate::FieldReader<u16>;
#[doc = "Field `PLL3N` writer - Multiplication factor for PLL3VCO Set and reset by software to control the multiplication factor of the VCO. These bits can be written only when the PLL is disabled (PLL3ON = 0 and PLL3RDY = 0). ... ... Others: reserved"]
pub type PLL3N_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 9, O, u16>;
#[doc = "Field `PLL3P` reader - PLL3 DIVP division factor Set and reset by software to control the frequency of the pll3_p_ck clock. These bits can be written only when the PLL3 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
pub type PLL3P_R = crate::FieldReader;
#[doc = "Field `PLL3P` writer - PLL3 DIVP division factor Set and reset by software to control the frequency of the pll3_p_ck clock. These bits can be written only when the PLL3 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
pub type PLL3P_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 7, O>;
#[doc = "Field `PLL3Q` reader - PLL3 DIVQ division factor Set and reset by software to control the frequency of the pll3_q_ck clock. These bits can be written only when the PLL3 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
pub type PLL3Q_R = crate::FieldReader;
#[doc = "Field `PLL3Q` writer - PLL3 DIVQ division factor Set and reset by software to control the frequency of the pll3_q_ck clock. These bits can be written only when the PLL3 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
pub type PLL3Q_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 7, O>;
#[doc = "Field `PLL3R` reader - PLL3 DIVR division factor Set and reset by software to control the frequency of the pll3_r_ck clock. These bits can be written only when the PLL1 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
pub type PLL3R_R = crate::FieldReader;
#[doc = "Field `PLL3R` writer - PLL3 DIVR division factor Set and reset by software to control the frequency of the pll3_r_ck clock. These bits can be written only when the PLL1 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
pub type PLL3R_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 7, O>;
impl R {
#[doc = "Bits 0:8 - Multiplication factor for PLL3VCO Set and reset by software to control the multiplication factor of the VCO. These bits can be written only when the PLL is disabled (PLL3ON = 0 and PLL3RDY = 0). ... ... Others: reserved"]
#[inline(always)]
pub fn pll3n(&self) -> PLL3N_R {
PLL3N_R::new((self.bits & 0x01ff) as u16)
}
#[doc = "Bits 9:15 - PLL3 DIVP division factor Set and reset by software to control the frequency of the pll3_p_ck clock. These bits can be written only when the PLL3 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
#[inline(always)]
pub fn pll3p(&self) -> PLL3P_R {
PLL3P_R::new(((self.bits >> 9) & 0x7f) as u8)
}
#[doc = "Bits 16:22 - PLL3 DIVQ division factor Set and reset by software to control the frequency of the pll3_q_ck clock. These bits can be written only when the PLL3 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
#[inline(always)]
pub fn pll3q(&self) -> PLL3Q_R {
PLL3Q_R::new(((self.bits >> 16) & 0x7f) as u8)
}
#[doc = "Bits 24:30 - PLL3 DIVR division factor Set and reset by software to control the frequency of the pll3_r_ck clock. These bits can be written only when the PLL1 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
#[inline(always)]
pub fn pll3r(&self) -> PLL3R_R {
PLL3R_R::new(((self.bits >> 24) & 0x7f) as u8)
}
}
impl W {
#[doc = "Bits 0:8 - Multiplication factor for PLL3VCO Set and reset by software to control the multiplication factor of the VCO. These bits can be written only when the PLL is disabled (PLL3ON = 0 and PLL3RDY = 0). ... ... Others: reserved"]
#[inline(always)]
#[must_use]
pub fn pll3n(&mut self) -> PLL3N_W<PLL3DIVR_SPEC, 0> {
PLL3N_W::new(self)
}
#[doc = "Bits 9:15 - PLL3 DIVP division factor Set and reset by software to control the frequency of the pll3_p_ck clock. These bits can be written only when the PLL3 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
#[inline(always)]
#[must_use]
pub fn pll3p(&mut self) -> PLL3P_W<PLL3DIVR_SPEC, 9> {
PLL3P_W::new(self)
}
#[doc = "Bits 16:22 - PLL3 DIVQ division factor Set and reset by software to control the frequency of the pll3_q_ck clock. These bits can be written only when the PLL3 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
#[inline(always)]
#[must_use]
pub fn pll3q(&mut self) -> PLL3Q_W<PLL3DIVR_SPEC, 16> {
PLL3Q_W::new(self)
}
#[doc = "Bits 24:30 - PLL3 DIVR division factor Set and reset by software to control the frequency of the pll3_r_ck clock. These bits can be written only when the PLL1 is disabled (PLL3ON = 0 and PLL3RDY = 0). ..."]
#[inline(always)]
#[must_use]
pub fn pll3r(&mut self) -> PLL3R_W<PLL3DIVR_SPEC, 24> {
PLL3R_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "RCC PLL3 dividers register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`pll3divr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`pll3divr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct PLL3DIVR_SPEC;
impl crate::RegisterSpec for PLL3DIVR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`pll3divr::R`](R) reader structure"]
impl crate::Readable for PLL3DIVR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`pll3divr::W`](W) writer structure"]
impl crate::Writable for PLL3DIVR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets PLL3DIVR to value 0x0101_0280"]
impl crate::Resettable for PLL3DIVR_SPEC {
const RESET_VALUE: Self::Ux = 0x0101_0280;
}
|
mod appearance;
mod h24;
mod solar;
pub mod time;
pub use appearance::{current_image_index_appearance, get_image_index_order_appearance};
pub use h24::{current_image_index_h24, get_image_index_order_h24, sort_time_items};
pub use solar::{current_image_index_solar, get_image_index_order_solar, sort_solar_items};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.