text stringlengths 8 4.13M |
|---|
#[macro_use]
extern crate iron;
extern crate cookie_fe;
extern crate router;
extern crate time;
use iron::prelude::*;
use iron::status;
use iron::AroundMiddleware;
use router::Router;
use cookie_fe::{Builder as CookieBuilder, Util as CookieUtil, CookiePair};
const KEY: &'static [u8] = b"4b8eee793a846531d6d95dd66ae48319";
fn root(req: &mut Request) -> IronResult<Response> {
let mut res = Response::with((status::Ok));
let jar = iexpect!(req.extensions.get_mut::<CookieUtil>()
.and_then(|x| x.jar()));
let cookie = CookiePair::new("foo".to_string(),
format!("{}", time::now().rfc3339()));
let old = jar.signed().find("foo")
.map(|x| x.value )
.unwrap_or_else(|| "none".to_string() );
jar.signed().add(cookie);
res.set_mut(old);
Ok(res)
}
fn main() {
let mut router = Router::new();
router.get("/", root);
let chain = Chain::new(router);
let wrapped = CookieBuilder::new(KEY).around(Box::new(chain));
Iron::new(wrapped).http("0.0.0.0:3000").unwrap();
}
|
use futures::prelude::*;
use std::fmt;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
/// A stream representing notifications at a fixed interval.
#[must_use = "streams do nothing unless polled"]
pub struct Interval {
inner: Pin<Box<dyn runtime_raw::Interval>>,
}
impl Interval {
/// Create a stream that fires events at a set interval.
///
/// ## Examples
/// ```
/// # use futures::prelude::*;
/// use runtime::time::Interval;
/// use std::time::{Duration, Instant};
///
/// # #[runtime::main]
/// # async fn main () -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
/// let start = Instant::now();
/// let mut interval = Interval::new(Duration::from_millis(10)).take(3);
/// while let Some(now) = interval.next().await {
/// println!("{}ms have elapsed", (now - start).as_millis());
/// }
///
/// assert!(Instant::now() - start >= Duration::from_millis(30));
/// # Ok(())}
/// ```
#[inline]
pub fn new(dur: Duration) -> Self {
let inner = runtime_raw::current_runtime().new_interval(dur);
Self { inner }
}
}
impl Stream for Interval {
type Item = Instant;
#[inline]
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.inner.poll_next_unpin(cx)
}
}
impl fmt::Debug for Interval {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
fmt::Debug::fmt(&self.inner, f)
}
}
|
/// Explicitly lock a [`std::io::Write`]able
pub trait Lockable {
type Locked;
/// Get exclusive access to the `Stream`
///
/// Why?
/// - Faster performance when writing in a loop
/// - Avoid other threads interleaving output with the current thread
fn lock(self) -> Self::Locked;
}
impl Lockable for std::io::Stdout {
type Locked = std::io::StdoutLock<'static>;
#[inline]
fn lock(self) -> Self::Locked {
#[allow(clippy::needless_borrow)] // Its needed to avoid recursion
(&self).lock()
}
}
impl Lockable for std::io::Stderr {
type Locked = std::io::StderrLock<'static>;
#[inline]
fn lock(self) -> Self::Locked {
#[allow(clippy::needless_borrow)] // Its needed to avoid recursion
(&self).lock()
}
}
|
extern crate sio;
use self::sio as core;
pub mod plat;
pub mod logger;
pub mod error;
//pub mod notes;
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
}
}
|
pub mod add_event_listener_5;
pub mod document_1;
pub mod on_submit_1;
pub mod window_0;
use std::cell::RefCell;
use std::rc::Rc;
use wasm_bindgen::closure::Closure;
use wasm_bindgen::JsCast;
use web_sys::{Event, EventTarget, Window};
use liblumen_alloc::erts::fragment::HeapFragment;
use liblumen_alloc::erts::term::prelude::*;
use crate::event_listener;
use crate::r#async;
use crate::runtime::process::spawn::options::Options;
pub fn add_event_listener(
window: &Window,
event: &'static str,
module: Atom,
function: Atom,
options: Options,
) {
let f = Rc::new(RefCell::new(None));
let g = f.clone();
let event_listener = move |event: &Event| {
event.prevent_default();
let promise_module = event_listener::module();
let promise_function = event_listener::apply_4::function();
let (event_listener_boxed_resource, event_listener_non_null_heap_fragment) =
HeapFragment::new_resource(f.clone()).unwrap();
let (event_boxed_resource, event_non_null_heap_fragment) =
HeapFragment::new_resource(event.clone()).unwrap();
let promise_argument_vec = vec![
event_listener_boxed_resource.into(),
event_boxed_resource.into(),
module.encode().unwrap(),
function.encode().unwrap(),
];
let promise = r#async::apply_3::promise(
promise_module,
promise_function,
promise_argument_vec,
options,
)
.unwrap();
// drop heap fragments now that term are cloned to spawned process in
// reverse order
std::mem::drop(event_non_null_heap_fragment);
std::mem::drop(event_listener_non_null_heap_fragment);
promise
};
let event_listener_box: Box<dyn FnMut(&Event) -> js_sys::Promise> = Box::new(event_listener);
let event_listener_closure = Closure::wrap(event_listener_box);
*g.borrow_mut() = Some(event_listener_closure);
let window_event_target: &EventTarget = window.as_ref();
window_event_target
.add_event_listener_with_callback(
event,
g.borrow().as_ref().unwrap().as_ref().unchecked_ref(),
)
.unwrap();
}
pub fn module() -> Atom {
Atom::from_str("Elixir.Lumen.Web.Window")
}
|
// Copyright 2018 Mattias Cibien
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use rand;
use rand::distributions::range::SampleRange;
use rand::Rng;
use super::DiceData;
use num_traits::int::PrimInt;
use num_traits::zero;
// TODO: not sure if this is correct
use num_iter::range;
use num_traits::one;
pub fn roll<T>(dice_data: DiceData<T>) -> T
where
T: PrimInt + SampleRange,
{
let mut rng = rand::thread_rng();
roll_with_fn(dice_data, |a, b| { rng.gen_range(a,b) })
}
pub fn roll_with_fn<T,F>(dice_data: DiceData<T>, mut function: F) -> T
where
T: PrimInt + SampleRange,
F: FnMut(T,T) -> T,
{
let mut result: T = zero();
for _i in range(zero(), dice_data.num_dice) {
let roll: T = function(zero(), dice_data.num_faces) + one();
result = result + roll;
}
if dice_data.modifier {
result = result + dice_data.modifier_val;
} else {
result = result - dice_data.modifier_val;
}
result
}
#[cfg(test)]
mod test;
|
mod client;
mod server;
pub use client::*;
pub use server::*;
|
mod memory;
mod stringref;
pub use self::memory::*;
pub use self::stringref::*;
|
pub mod chip8_mod;
pub mod sdl_mod;
pub mod utils_mod; |
pub mod traits;
pub mod bounded_channel;
pub mod angular_channel;
pub mod free_channel;
pub mod cast;
pub mod scalar;
pub use self::traits::*;
pub use self::bounded_channel::*;
pub use self::angular_channel::*;
pub use self::free_channel::*;
pub use self::cast::*;
pub use self::scalar::*;
|
extern crate cgmath;
extern crate glium;
extern crate time;
use crate::my_game::cgmath::InnerSpace;
#[path = "./drawable/mesh.rs"]
mod drawables;
static MOUSE_SPEED: f32 = 0.005;
pub enum Keystate {
Nothing,
Pressed,
Released,
}
pub struct Cam {
pub pos: cgmath::Point3<f32>,
pub look_dir: cgmath::Vector3<f32>,
pub perspective: cgmath::Matrix4<f32>,
}
pub struct DummyGame {
pub renderer: glium::Program,
pub events_loop: glutin::EventsLoop,
pub display: glium::Display,
pub closed: bool,
pub meshes: Vec<drawables::StaticMesh>,
pub cam: Cam,
pub keys_pressed: [bool; 6],
pub previous_mouse_position: glutin::dpi::LogicalPosition,
}
impl DummyGame {
pub fn new(title: &str, vsync: bool) -> DummyGame {
use glium::glutin;
let events_loop = glutin::EventsLoop::new();
let window = glutin::WindowBuilder::new().with_title(title);
let context = glutin::ContextBuilder::new()
.with_depth_buffer(24)
.with_vsync(vsync);
let mut display = glium::Display::new(window, context, &events_loop).unwrap();
let vertex_shader = std::fs::read_to_string("./res/shader/vs.glsl").unwrap();
let fragment_shader = std::fs::read_to_string("./res/shader/fs.glsl").unwrap();
let vertex_shader_src: &str = vertex_shader.as_ref();
let fragment_shader_src: &str = fragment_shader.as_ref();
let program =
glium::Program::from_source(&display, vertex_shader_src, fragment_shader_src, None)
.unwrap();
let mut ms = Vec::new();
ms.push(drawables::StaticMesh::load_obj(
&mut display,
"./res/teapot.obj",
));
let pos = cgmath::Point3 {
x: 2.0,
y: -1.0,
z: 1.0,
};
let look_dir: cgmath::Vector3<f32> = cgmath::Vector3 {
x: -2.0,
y: 1.0,
z: 1.0,
};
let look_dir = look_dir.normalize();
let fv = cgmath::Rad(std::f64::consts::PI / 3.0);
let perspective: cgmath::Matrix4<f32> =
cgmath::perspective(fv, (4 / 3) as f32, 0.1, 1024.0);
//let kp = Vec::new();
DummyGame {
renderer: (program),
events_loop: (events_loop),
display: (display),
closed: false,
meshes: ms,
cam: Cam {
pos: (pos),
look_dir: (dir),
perspective: (perspective),
},
keys_pressed: [false, false, false, false, false, false],
previous_mouse_position: glutin::dpi::LogicalPosition::new(0.0, 0.0),
}
}
fn input(&mut self) {
use glutin::ElementState::Pressed;
let mut close = false;
let mut w: Keystate = Keystate::Nothing;
let mut s: Keystate = Keystate::Nothing;
let mut a: Keystate = Keystate::Nothing;
let mut d: Keystate = Keystate::Nothing;
let mut x: Keystate = Keystate::Nothing;
let mut y: Keystate = Keystate::Nothing;
let mut ctrl_pressed = false;
let mut size_changed = false;
let fv = cgmath::Rad(std::f64::consts::PI / 3.0);
let mut perspective_new: cgmath::Matrix4<f32> =
cgmath::perspective(fv, (4 / 3) as f32, 0.1, 1024.0);
let mut current_position: glutin::dpi::LogicalPosition =
glutin::dpi::LogicalPosition::new(0.0, 0.0);
self.events_loop.poll_events(|event| match event {
glutin::Event::WindowEvent { event, .. } => match event {
glutin::WindowEvent::Resized(size) => {
size_changed = true;
let fv = cgmath::Rad(std::f64::consts::PI / 3.0);
perspective_new=cgmath::perspective(fv, (size.width / size.height)as f32, 0.1, 1024.0);
},
glutin::WindowEvent::CloseRequested => close = true,
glutin::WindowEvent::CursorMoved {
position,
modifiers,
..
} => {
if modifiers.ctrl {
ctrl_pressed = true;
}
current_position = position;
}
glutin::WindowEvent::KeyboardInput { input, .. } => match input.virtual_keycode {
Some(glutin::VirtualKeyCode::Escape) => close = true,
Some(glutin::VirtualKeyCode::W) => {
if input.state == Pressed {
w = Keystate::Pressed
} else {
w = Keystate::Released
}
}
Some(glutin::VirtualKeyCode::S) => {
if input.state == Pressed {
s = Keystate::Pressed
} else {
s = Keystate::Released
}
}
Some(glutin::VirtualKeyCode::A) => {
if input.state == Pressed {
a = Keystate::Pressed
} else {
a = Keystate::Released
}
}
Some(glutin::VirtualKeyCode::D) => {
if input.state == Pressed {
d = Keystate::Pressed
} else {
d = Keystate::Released
}
}
Some(glutin::VirtualKeyCode::X) => {
if input.state == Pressed {
x = Keystate::Pressed
} else {
x = Keystate::Released
}
}
Some(glutin::VirtualKeyCode::Y) => {
if input.state == Pressed {
y = Keystate::Pressed
} else {
y = Keystate::Released
}
}
_ => (),
},
_ => (),
},
_ => (),
});
self.closed = close;
if size_changed {
self.cam.perspective = perspective_new;
}
if ctrl_pressed {
let dx: f64 = current_position.x - self.previous_mouse_position.x;
let dy: f64 = current_position.y - self.previous_mouse_position.y;
self.previous_mouse_position = current_position;
if dx.abs() <= 5.0 || dy.abs() <= 5.0 {
self.cam.h_angle += MOUSE_SPEED * -dx as f32;
self.cam.v_angle += MOUSE_SPEED * -dy as f32;
self.cam.adjust_angle();
}
}
match w {
Keystate::Pressed => self.keys_pressed[0] = true,
Keystate::Released => self.keys_pressed[0] = false,
_ => (),
}
match s {
Keystate::Pressed => self.keys_pressed[1] = true,
Keystate::Released => self.keys_pressed[1] = false,
_ => (),
}
match a {
Keystate::Pressed => self.keys_pressed[2] = true,
Keystate::Released => self.keys_pressed[2] = false,
_ => (),
}
match d {
Keystate::Pressed => self.keys_pressed[3] = true,
Keystate::Released => self.keys_pressed[3] = false,
_ => (),
}
match x {
Keystate::Pressed => self.keys_pressed[4] = true,
Keystate::Released => self.keys_pressed[4] = false,
_ => (),
}
match y {
Keystate::Pressed => self.keys_pressed[5] = true,
Keystate::Released => self.keys_pressed[5] = false,
_ => (),
}
}
fn update(&mut self, dt: i64) {
use cgmath::Rad;
let target_speed: f32 = 1.0;
let mut dir: cgmath::Vector3<f32> = self.cam.look_dir;
let mut speed = 0.0;
let mut ang: Rad<f32>;
ang = Rad(std::f32::consts::PI);
if self.keys_pressed[0] && self.keys_pressed[2] {
ang = Rad(2.0 * std::f32::consts::PI * (1.0 / 8.0));
speed = target_speed;
} else if self.keys_pressed[0] && self.keys_pressed[3] {
ang = Rad(2.0 * std::f32::consts::PI * (7.0 / 8.0));
speed = target_speed;
} else if self.keys_pressed[1] && self.keys_pressed[2] {
ang = Rad(2.0 * std::f32::consts::PI * (3.0 / 8.0));
dir.y *= -1.0;
speed = target_speed;
} else if self.keys_pressed[1] && self.keys_pressed[3] {
ang = Rad(2.0 * std::f32::consts::PI * (5.0 / 8.0));
dir.y *= -1.0;
speed = target_speed;
} else if self.keys_pressed[0] && self.keys_pressed[1] {
} else if self.keys_pressed[2] && self.keys_pressed[3] {
} else if self.keys_pressed[2] {
ang = Rad(2.0 * std::f32::consts::PI * (2.0 / 8.0));
dir.y = 0.0;
speed = target_speed;
} else if self.keys_pressed[3] {
ang = Rad(2.0 * std::f32::consts::PI * (3.0 / 4.0));
dir.y = 0.0;
speed = target_speed;
} else if self.keys_pressed[1] {
ang = Rad(std::f32::consts::PI);
dir = dir.normalize();
dir.y *= -1.0;
speed = target_speed;
} else if self.keys_pressed[0] {
ang = Rad(0.0);
dir = dir.normalize();
speed = target_speed;
}
speed *= dt as f32;
speed /= 1000.0;
if self.keys_pressed[4] {
self.cam.pos.y += target_speed * dt as f32 / 1000.0;
}
if self.keys_pressed[5] {
self.cam.pos.y -= target_speed * dt as f32 / 1000.0;
}
let rot: cgmath::Matrix4<f32> = cgmath::Matrix4::from_angle_y(ang);
//dir = dir.extend(1.0);
let mut dir = dir.extend(1.0);
dir = rot * dir;
let mut dir = dir.truncate();
dir *= speed;
self.cam.pos += dir;
}
fn render(&self) {
use cgmath::{conv, Matrix4};
use glium::Surface;
let mut target = self.display.draw();
target.clear_color_and_depth((0.0, 0.0, 1.0, 1.0), 1.0);
let light = [-1.0, 0.4, 0.9f32];
let up_v = cgmath::Vector3 {
x: 0.0,
y: 1.0,
z: 0.0,
};
let view: Matrix4<f32> = Matrix4::look_at_dir(self.cam.pos, self.cam.look_dir, up_v);
let params = glium::DrawParameters {
depth: glium::Depth {
test: glium::draw_parameters::DepthTest::IfLess,
write: true,
..Default::default()
},
//backface_culling: glium::draw_parameters::BackfaceCullingMode::CullClockwise,
..Default::default()
};
for mesh in &self.meshes {
let model = mesh.translation * mesh.rotation * mesh.scale;
target
.draw(
(&mesh.vertices, &mesh.normals),
&mesh.indices,
&self.renderer,
&uniform! { model: conv::array4x4(model), view: conv::array4x4(view),
perspective: conv::array4x4(self.cam.perspective), u_light: light },
¶ms,
)
.unwrap();
}
target.finish().unwrap();
}
fn game_loop(&mut self) {
use time::PreciseTime;
let mut previous = PreciseTime::now();
let mut lag: i64 = 0;
let mcs_per_update: i64 = 10000;
let mut fpsc = 0;
let mut start = PreciseTime::now();
while !self.closed {
self.input();
let current = PreciseTime::now();
let elapsed: i64;
match previous.to(current).num_microseconds() {
Some(x) => elapsed = x,
None => elapsed = std::i64::MAX,
}
previous = current;
lag += elapsed;
while lag >= mcs_per_update {
self.update(10);
lag -= mcs_per_update;
}
let end = PreciseTime::now();
fpsc+=1;
if start.to(end).num_seconds()>=1{
println!("fps: {}",fpsc);
fpsc=0;
start = PreciseTime::now();
}
self.render();
}
}
pub fn start(&mut self) {
self.game_loop();
}
}
|
// Copyright (c) 2021 Quark Container Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::alloc::{GlobalAlloc, Layout};
use core::sync::atomic::{AtomicBool, AtomicUsize};
use core::sync::atomic::Ordering;
use core::cmp::max;
use core::mem::size_of;
use core::ptr::NonNull;
use spin::Mutex;
use buddy_system_allocator::Heap;
pub const CLASS_CNT : usize = 16;
pub const FREE_THRESHOLD: usize = 30; // when free size less than 30%, need to free buffer
pub const BUFF_THRESHOLD: usize = 50; // when buff size takes more than 50% of free size, needs to free
pub const FREE_BATCH: usize = 10; // free 10 blocks each time.
pub const ORDER : usize = 33;
pub struct ListAllocator {
pub bufs: [Mutex<FreeMemBlockMgr>; CLASS_CNT],
pub heap: Mutex<Heap<ORDER>>,
pub total: AtomicUsize,
pub free: AtomicUsize,
pub bufSize: AtomicUsize,
//pub errorHandler: Arc<OOMHandler>
pub initialized: AtomicBool
}
pub trait OOMHandler {
fn handleError(&self, a:u64, b:u64) -> ();
}
impl ListAllocator {
pub const fn Empty() -> Self {
let bufs : [Mutex<FreeMemBlockMgr>; CLASS_CNT] = [
Mutex::new(FreeMemBlockMgr::New(0, 0)),
Mutex::new(FreeMemBlockMgr::New(0, 1)),
Mutex::new(FreeMemBlockMgr::New(0, 2)),
Mutex::new(FreeMemBlockMgr::New(128, 3)),
Mutex::new(FreeMemBlockMgr::New(128, 4)),
Mutex::new(FreeMemBlockMgr::New(128, 5)),
Mutex::new(FreeMemBlockMgr::New(64, 6)),
Mutex::new(FreeMemBlockMgr::New(64, 7)),
Mutex::new(FreeMemBlockMgr::New(64, 8)),
Mutex::new(FreeMemBlockMgr::New(32, 9)),
Mutex::new(FreeMemBlockMgr::New(32, 10)),
Mutex::new(FreeMemBlockMgr::New(16, 11)),
Mutex::new(FreeMemBlockMgr::New(1024, 12)),
Mutex::new(FreeMemBlockMgr::New(16, 13)),
Mutex::new(FreeMemBlockMgr::New(8, 14)),
Mutex::new(FreeMemBlockMgr::New(8, 15))
];
return Self {
bufs: bufs,
heap: Mutex::new(Heap::empty()),
total: AtomicUsize::new(0),
free: AtomicUsize::new(0),
bufSize: AtomicUsize::new(0),
initialized: AtomicBool::new(false)
}
}
pub fn AddToHead(&self, start: usize, end: usize) {
unsafe {
self.heap.lock().add_to_heap(start, end);
}
let size = end - start;
self.total.fetch_add(size, Ordering::Release);
self.free.fetch_add(size, Ordering::Release);
}
/// add the chunk of memory (start, start+size) to heap for allocating dynamic memory
pub fn Add(&self, start: usize, size: usize) {
let mut start = start;
let end = start + size;
let size = 1 << 30; // 1GB
// note: we can't add full range (>4GB) to the buddyallocator
while start + size < end {
self.AddToHead(start, start + size);
start += size;
}
if start < end {
self.AddToHead(start, end)
}
self.initialized.store(true, Ordering::Relaxed);
}
pub fn NeedFree(&self) -> bool {
let total = self.total.load(Ordering::Acquire);
let free = self.free.load(Ordering::Acquire);
let bufSize = self.bufSize.load(Ordering::Acquire);
if free > core::usize::MAX / 100 || total > core::usize::MAX / 100 {
error!("total is {:x}, free is {:x}, buffsize is {:x}", total, free, bufSize);
}
if total * FREE_THRESHOLD / 100 > free && // there are too little free memory
free * BUFF_THRESHOLD /100 < bufSize { // there are too much bufferred memory
return true
}
return false
}
// ret: true: free some memory, false: no memory freed
pub fn Free(&self) -> bool {
let mut count = 0;
for i in 0..self.bufs.len() {
if !self.NeedFree() || count == FREE_BATCH {
return count > 0
}
let idx = self.bufs.len() - i - 1; // free from larger size
let cnt = self.bufs[idx].lock().FreeMultiple(&self.heap, FREE_BATCH - count);
self.bufSize.fetch_sub(cnt * self.bufs[idx].lock().size, Ordering::Release);
count += cnt;
}
return count > 0;
}
}
unsafe impl GlobalAlloc for ListAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let initialized = self.initialized.load(Ordering::Relaxed);
if !initialized {
self.initialize();
}
let size = max(
layout.size().next_power_of_two(),
max(layout.align(), size_of::<usize>()),
);
let class = size.trailing_zeros() as usize;
if 3 <= class && class < self.bufs.len() {
let ret = self.bufs[class].lock().Alloc();
if ret.is_some() {
self.bufSize.fetch_sub(size, Ordering::Release);
return ret.unwrap();
}
}
let ret = self
.heap
.lock()
.alloc(layout)
.ok()
.map_or(0 as *mut u8, |allocation| allocation.as_ptr()) as u64;
if ret == 0 {
self.handleError(size as u64, layout.align() as u64);
loop {}
}
// Subtract when ret != 0 to avoid overflow
self.free.fetch_sub(size, Ordering::Release);
return ret as *mut u8;
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let size = max(
layout.size().next_power_of_two(),
max(layout.align(), size_of::<usize>()),
);
let class = size.trailing_zeros() as usize;
self.free.fetch_add(size, Ordering::Release);
self.bufSize.fetch_add(size, Ordering::Release);
if class < self.bufs.len() {
return self.bufs[class].lock().Dealloc(ptr, &self.heap);
}
self.heap.lock().dealloc(NonNull::new_unchecked(ptr), layout)
}
}
/// FreeMemoryBlockMgr is used to manage heap memory block allocated by allocator
pub struct FreeMemBlockMgr {
pub size: usize,
pub count: usize,
pub reserve: usize,
pub list: MemList,
}
impl FreeMemBlockMgr {
/// Return a newly created FreeMemBlockMgr
/// # Arguments
///
/// * `reserve` - number of clocks the Block Manager keeps for itself when free multiple is called.
/// * `class` - denotes the block size this manager is in charge of. class i means the block is of size 2^i bytes
pub const fn New(reserve: usize, class: usize) -> Self {
return Self {
size: 1<<class,
reserve: reserve,
count: 0,
list: MemList::New(1<<class),
}
}
pub fn Layout(&self) -> Layout {
return Layout::from_size_align(self.size, self.size).unwrap();
}
pub fn Alloc(&mut self) -> Option<*mut u8> {
if self.count > 0 {
self.count -= 1;
let ret = self.list.Pop();
let ptr = ret as * mut MemBlock;
unsafe {
ptr.write(0)
}
return Some(ret as * mut u8)
} else {
return None
}
}
pub fn Dealloc(&mut self, ptr: *mut u8, _heap: &Mutex<Heap<ORDER>>) {
/*let size = self.size / 8;
unsafe {
let toArr = slice::from_raw_parts(ptr as *mut u64, size);
for i in 0..size {
assert!(toArr[i] == 0);
}
}*/
self.count += 1;
self.list.Push(ptr as u64);
}
fn Free(&mut self, heap: &Mutex<Heap<ORDER>>) {
assert!(self.count > 0);
self.count -= 1;
let addr = self.list.Pop();
unsafe {
heap.lock().dealloc(NonNull::new_unchecked(addr as * mut u8), self.Layout());
}
}
pub fn FreeMultiple(&mut self, heap: &Mutex<Heap<ORDER>>, count: usize) -> usize {
for i in 0..count {
if self.count <= self.reserve {
return i;
}
self.Free(heap)
}
return count;
}
}
type MemBlock = u64;
pub struct MemList {
size: u64,
head: MemBlock,
tail: MemBlock,
}
impl MemList {
pub const fn New(size: usize) -> Self {
return Self {
size: size as u64,
head: 0,
tail: 0,
}
}
pub fn Push(&mut self, addr: u64) {
assert!(addr % self.size == 0);
let newB = addr as * mut MemBlock;
unsafe {
*newB = 0;
}
if self.head == 0 {
self.head = addr;
self.tail = addr;
return
}
let tail = self.tail;
let ptr = tail as * mut MemBlock;
unsafe {
*ptr = addr;
}
self.tail = addr;
}
pub fn Pop(&mut self) -> u64 {
if self.head == 0 {
return 0
}
let next = self.head;
if self.head == self.tail {
self.head = 0;
self.tail = 0;
return next;
}
let ptr = unsafe {
&mut *(next as * mut MemBlock)
};
self.head = *ptr;
assert!(next % self.size == 0);
return next;
}
} |
use common::aoc::{load_input, run_many, run_many_mut, print_result, print_time};
use common::intcode::VM;
use common::grid::{Grid, BigGrid};
fn main() {
let input = load_input("day19");
let (mut vm, dur_parse) = run_many(1000, || VM::parse(&input.trim_end_matches("\n")));
let (res_part1, dur_part1) = run_many_mut(100, || part1(&mut vm));
let (res_part2, dur_part2) = run_many_mut(1, || part2(&mut vm));
print_result("P1", res_part1);
print_result("P2", res_part2);
print_time("Parse", dur_parse);
print_time("P1", dur_part1);
print_time("P2", dur_part2);
}
fn part1(vm: &mut VM) -> usize {
let mut count = 0;
for y in 0..50 {
let mut found = false;
for x in 0..50 {
vm.reset();
vm.push_input(x);
vm.push_input(y);
vm.run();
if *vm.read_output().last().unwrap() == 1 {
count += 1;
found = true;
} else if found {
break;
}
}
}
count
}
fn part2(vm: &mut VM) -> i64 {
for y in 100.. {
if let Some(v) = part2_check(vm, y) {
return v;
}
}
-1
}
fn part2_check(vm: &mut VM, y: i64) -> Option<i64> {
for x in 0.. {
vm.reset();
vm.push_input(x);
vm.push_input(y);
vm.run();
if *vm.output().last().unwrap() == 1 {
vm.reset();
vm.push_input(x);
vm.push_input(y-99);
vm.run();
if *vm.output().last().unwrap() == 0 {
return None;
}
vm.reset();
vm.push_input(x+99);
vm.push_input(y-99);
vm.run();
if *vm.output().last().unwrap() == 0 {
return None;
}
vm.reset();
vm.push_input(x+99);
vm.push_input(y);
vm.run();
if *vm.output().last().unwrap() == 0 {
return None;
}
return Some(x * 10000 + (y - 99));
}
}
None
}
|
use criterion::{criterion_group, criterion_main, Criterion};
use egraph_dataset::dataset_1138_bus;
use petgraph::prelude::*;
use petgraph_algorithm_shortest_path::*;
fn criterion_benchmark(c: &mut Criterion) {
let graph: UnGraph<(), ()> = dataset_1138_bus();
let mut group = c.benchmark_group("1138_bus");
group.bench_with_input("all_sources_bfs", &graph, |bench, graph| {
bench.iter(|| {
let _ = all_sources_bfs(graph, 30.);
});
});
group.bench_with_input("all_sources_dijkstra", &graph, |bench, graph| {
bench.iter(|| {
let _ = all_sources_dijkstra(graph, &mut |_| 30.);
});
});
group.bench_with_input("warshall_floyd", &graph, |bench, graph| {
bench.iter(|| {
let _ = warshall_floyd(graph, &mut |_| 30.);
});
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
|
extern crate rustc_serialize as serialize;
use std::io::prelude::*;
use std::io::BufReader;
use std::fs::File;
use serialize::hex::FromHex;
use std::str;
fn main() {
// Open the path in read-only mode, returns `io::Result<File>`
let file = match File::open("4.txt") {
// The `description` method of `io::Error` returns a string that
// describes the error
Err(why) => panic!("couldn't open 4.txt - {}", why),
Ok(file) => file,
};
let reader = BufReader::new(file);
for (index, line) in reader.lines().enumerate() {
let tuple = decode_string(line.unwrap());
//Filter out lines that contain invalid utf-8 replacement characters.
if !tuple.1.contains("�") {
println!("Line {}: {}: {}", index, tuple.0, tuple.1);
}
}
}
fn decode_string(hex_string: std::string::String) -> (u8, std::string::String) {
let max_char = 128;
let hex = hex_string.from_hex().unwrap();
let mut best_decoded_string = std::string::String::new();
let mut best_xor_char = 0;
for xor_char in 0..max_char {
let mut xor_vec = Vec::new();
for byte in hex.iter() {
xor_vec.push(byte ^ xor_char);
}
let decoded_str = String::from_utf8_lossy(&xor_vec);
if compare_string(best_decoded_string.to_string(), decoded_str.to_string()) == true {
best_decoded_string = decoded_str.to_string();
best_xor_char = xor_char;
}
}
return (best_xor_char, best_decoded_string);
}
fn score_string(s: &str) -> usize {
return s.matches(char::is_alphabetic).count() + s.matches(' ').count();
}
fn compare_string<'a>(s1: std::string::String, s2: std::string::String) -> bool {
let s1_score = score_string(&s1);
let s2_score = score_string(&s2);
if s1_score > s2_score {
return false;
} else {
return true;
}
}
|
use rand::Rng;
use crate::terrain::*;
use rayon::prelude::*;
use std::fmt;
#[derive(Clone, Debug)]
pub struct Forest {
layout: Vec<ForestFeature>,
size: usize,
}
impl Forest {
pub fn new(size: usize) -> Self {
let mut rng = rand::thread_rng();
Forest {
layout: (0..(size * size))
.map(|_| match rng.gen_range(0, 100) {
0..=2 => ForestFeature::Bear(BearInfo::new()),
3..=53 => ForestFeature::Tree(FloraVariant::Tree(12)),
54..=64 => ForestFeature::LumberJack(Woodcutter::new()),
_ => ForestFeature::Empty,
})
.collect(),
size,
}
}
///
/// takes an x and y cordiante gives a copy the resulting forestfeature
pub fn get(&self, x: usize, y: usize) -> ForestFeature {
self.layout[x * self.size + y]
}
///
/// Converts an absoluute location into
/// the projected x,y cordinate
pub fn get_x_y(&self, i: usize) -> (usize, usize) {
(i / self.size, i % self.size)
}
pub fn get_absolute_cordinate(&self, x: usize, y: usize) -> usize {
x * self.size + y
}
///
/// Takes an x,y coridnate and a feature to assign then
/// overwrites the location with the new feature
///
pub fn assign_at(&mut self, x: usize, y: usize, f: ForestFeature) {
self.layout[x * self.size + y] = f;
}
///
/// Returns a census struct that
/// has the populations of all
/// the forestfeatures
///
///
pub fn get_terrain_counts(&self) -> Census {
self.layout.iter().fold(
Census {
tree_count: 0,
bear_count: 0,
lumberjack_count: 0,
},
|acc, x| match x {
ForestFeature::Bear(_) => Census {
tree_count: acc.tree_count,
bear_count: acc.bear_count + 1,
lumberjack_count: acc.lumberjack_count,
},
ForestFeature::LumberJack(_) => Census {
tree_count: acc.tree_count,
bear_count: acc.bear_count,
lumberjack_count: acc.lumberjack_count + 1,
},
ForestFeature::Tree(_) => Census {
tree_count: acc.tree_count + 1,
..acc
},
ForestFeature::BearTree(_, _) => Census {
tree_count: acc.tree_count + 1,
bear_count: acc.bear_count + 1,
lumberjack_count: acc.lumberjack_count,
},
ForestFeature::LumberSeedling(_, _) => Census {
tree_count: acc.tree_count + 1,
bear_count: acc.bear_count,
lumberjack_count: acc.lumberjack_count + 1,
},
ForestFeature::Empty => acc,
},
)
}
///
/// gets a vector of interactable neighbours locations
/// interactable is specified by closure.
///
/// Examples of interactable are neighbours to move to or
/// plant to
///
pub fn get_interactable_neighbours_locations(
&self,
x: usize,
y: usize,
criteria: fn(ForestFeature) -> bool,
) -> Vec<(usize, usize)> {
[-1, 0, 1]
.iter()
.cloned()
.map(|delta_x| {
[-1, 0, 1].iter().cloned().filter_map(move |delta_y| {
let new_x = delta_x + x as i32;
let new_y = delta_y + y as i32;
if (delta_x == 0 && delta_y == 0)
|| new_x as usize >= self.size
|| new_y as usize >= self.size
|| new_x < 0
|| new_y < 0
{
None
} else {
let new_x = new_x as usize;
let new_y = new_y as usize;
let neighbour = self.get(new_x, new_y);
if criteria(neighbour) {
Some((new_x, new_y))
} else {
None
}
}
})
})
.flatten()
.collect()
}
///
/// Takes a closure that filters for ceratin Foresfeature types
/// applies the closure to every Forestfeature and
/// returns a vector of all valid locations as tuple (x,y)
///
pub fn get_locations(&self, criteria: fn(ForestFeature) -> bool) -> Vec<(usize, usize)> {
self.layout
.par_iter()
.enumerate()
.filter_map(|(i, x)| {
if criteria(*x) {
Some(self.get_x_y(i))
} else {
None
}
})
.collect()
}
pub fn get_locations_with_info<T>(
&self,
criteria: fn(ForestFeature) -> Option<T>,
) -> Vec<FeatureLocation<T>>
where
T: Send,
{
self.layout
.par_iter()
.enumerate()
.filter_map(|(i, x)| match criteria(*x) {
Some(loc) => {
let (x, y) = self.get_x_y(i);
Some(FeatureLocation { x, y, feature: loc })
}
_ => None,
})
.collect()
}
///
/// changes a forest feature into another by slice of locations
/// forest feature transformation is specified by
/// closure
///
pub fn transform_features(
&mut self,
new_locations: &[(usize, usize)],
transformation: fn(ForestFeature) -> ForestFeature,
) {
for position in new_locations {
let (new_x, new_y) = position;
let new_loc = self.get(*new_x, *new_y);
self.assign_at(*new_x, *new_y, transformation(new_loc));
}
}
///
/// Used to simulate movement in a forest
/// take a FeatureLocation that stores stuff
/// about the moving feature plus its location
/// coridnates to new location
/// and too closures to simulate movement one changes the orign
/// the other the destionation T is any statistical information that the destination closure may return
///
pub fn batched_move_to<T, U>(
&mut self,
moved_info: &U,
move_form: fn(ForestFeature, usize, &U) -> ForestFeature,
move_to: fn(ForestFeature, usize, &U) -> (ForestFeature, T),
) -> Vec<T>
where
U: Sync,
T: Send,
{
let (new, results): (Vec<ForestFeature>, Vec<T>) = self
.layout
.par_iter()
.enumerate()
.map(|(i, old_loc)| (i, move_form(*old_loc, i, moved_info)))
.map(|(i, new_loc)| move_to(new_loc, i, moved_info))
.unzip();
self.layout = new;
//resul
results
}
///
/// Calls the aging implementation on all
/// forestfeatures in the forest
///
pub fn age_features(&mut self) {
self.layout = self.layout.par_iter().map(|x| x.age()).collect();
}
///
/// Changes the features to be initial state
/// this will reset movement and lumber statistics
///
pub fn reset_feature_state(&mut self) {
self.layout.par_iter_mut().for_each(|x| match x {
ForestFeature::Bear(b) => b.finished_moving = false,
ForestFeature::BearTree(b, _) => b.finished_moving = false,
ForestFeature::LumberJack(l) => {
l.finished_moving = false;
l.lumber_collected = 0
}
ForestFeature::LumberSeedling(l, _) => {
l.finished_moving = false;
l.lumber_collected = 0
}
_ => (),
});
}
}
impl fmt::Display for Forest {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
for line in self.layout.as_slice().chunks(self.size) {
for &terrain in line {
let symbol = match terrain {
ForestFeature::Empty => '⬛',
ForestFeature::Tree(t) => match t {
FloraVariant::Sapling(_) => '🎄',
FloraVariant::Tree(_) => '🌲',
FloraVariant::Elder(_) => '🌳',
},
ForestFeature::LumberJack(_) => '👷',
ForestFeature::Bear(_) => '🐻',
ForestFeature::BearTree(_, _) => '🍯',
ForestFeature::LumberSeedling(_, _) => '🌴',
};
write!(f, "{}", symbol)?;
}
writeln!(f)?;
}
Ok(())
}
}
pub mod actions {
use super::Forest;
use crate::terrain::*;
use rand::rngs::ThreadRng;
use rand::Rng;
use rayon::prelude::*;
use std::cmp::Ordering;
use std::collections::HashMap;
use std::collections::HashSet;
use std::{
sync::Arc,
sync::{
mpsc::{channel, Sender},
Mutex,
},
};
use threadpool::ThreadPool;
struct Hashes {
trees: HashSet<usize>,
before_lumbers: HashSet<usize>,
lumbers: HashMap<usize, Woodcutter>,
before_bears: HashSet<usize>,
bears: HashMap<usize, BearInfo>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum ChangeEvent {
SaplingPlanted,
TreeCutDown(u32),
LumberJackMauled(Woodcutter),
SaplingPlantedLumberJackMauled(Woodcutter),
}
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
struct Event {
saplings_planted: u32,
wood_cut: u32,
lumberjacks_mauled: u32,
}
///
/// Parallel simulation
///
pub fn simulate(size: usize, logging_channel: Sender<String>) {
let mut rng = rand::thread_rng();
let mut simulated_forest = Forest::new(size);
let forest_pool = ThreadPool::new(3);
let logging_pool = ThreadPool::new(6);
let sync_time = Arc::new(Mutex::new((1, 1)));
/* The main simulation */
for year in 1..400 {
let mut annual_wood_chop = 0;
let mut annual_mualing = 0;
let mut annual_sapling_plant = 0;
for month in 1..=12 {
//simulated_forest.age_features();
simulated_forest.age_features();
let mut monthly_changes = Vec::with_capacity(5);
for move_phase in 0..5 {
monthly_changes.push(process_terrain(&mut simulated_forest, move_phase, &forest_pool));
}
simulated_forest.reset_feature_state();
let monthly_changes = monthly_changes.into_par_iter().reduce(
|| Event {
saplings_planted: 0,
wood_cut: 0,
lumberjacks_mauled: 0,
},
|a, b| Event {
saplings_planted: a.saplings_planted + b.saplings_planted,
wood_cut: a.wood_cut + b.wood_cut,
lumberjacks_mauled: a.lumberjacks_mauled + b.lumberjacks_mauled,
},
);
let monthly_log = logging_channel.clone();
let forest_string = simulated_forest.to_string();
let current_sync = sync_time.clone();
logging_pool.execute(move || {
loop {
//println!("month mutex request");
let mut sync = match current_sync.lock() {
Ok(a) => a,
e => {print!("{:?}",e); std::process::exit(0);} ,
};
let (sync_year,mut sync_month) = *sync;
if sync_year == year && sync_month == month {
monthly_log.send(forest_string).unwrap();
monthly_log.send(format!(
"month {} year {}, units of wood chopped this month: {}, lumberjacks_malued: {}, saplings planted: {},",
month,year,monthly_changes.wood_cut, monthly_changes.lumberjacks_mauled, monthly_changes.saplings_planted)
).unwrap();
monthly_log.send(format!("{:-<1$}", "", size * 2)).unwrap();
//if 0 then print the yearlog
sync_month = (sync_month + 1) %13;
//println!("{}",sync_month);
*sync = (sync_year,sync_month);
// println!("sending month");
return
}
}
});
annual_wood_chop += monthly_changes.wood_cut;
annual_mualing += monthly_changes.lumberjacks_mauled;
annual_sapling_plant += monthly_changes.saplings_planted;
}
let censare = populate(
&mut simulated_forest,
&mut rng,
annual_wood_chop,
annual_mualing,
);
let current_sync = sync_time.clone();
let yearly_log = logging_channel.clone();
logging_pool.execute(
move|| loop {
let mut sync = current_sync.lock().unwrap();
let ( mut sync_year,mut sync_month) = *sync;
// println!("year mutex request {}, {}, {}",year,sync_year,sync_month);
if sync_year == year && sync_month == 0 {
yearly_log.send(format!("yearly census {}", censare)).unwrap();
yearly_log.send(format!(
"year {}, wood chopped this year:{} ,lumberJacks mauled:{} ,saplings planted:{} ",
year, annual_wood_chop, annual_mualing, annual_sapling_plant
)).unwrap();
yearly_log.send(format!("{:_<1$}", "", size * 2)).unwrap();
sync_month = 1;
sync_year += 1;
*sync = (sync_year,sync_month);
//println!("sending year");
return;
}
}
)
}
}
fn process_terrain(simulated_forest: &mut Forest, move_phase: u32, pool: &ThreadPool) -> Event {
let changes = collect_movements(&simulated_forest, move_phase, pool);
let moved_from_actions = |old_loc, i, movements: &Hashes| match old_loc {
ForestFeature::LumberJack(l) => {
if movements.before_lumbers.contains(&i) {
ForestFeature::Empty
} else {
ForestFeature::LumberJack(l)
}
}
ForestFeature::LumberSeedling(l, sap) => {
if movements.before_lumbers.contains(&i) {
ForestFeature::Tree(FloraVariant::Sapling(sap))
} else {
ForestFeature::LumberSeedling(l, sap)
}
}
ForestFeature::Bear(b) => {
if movements.before_bears.contains(&i) {
ForestFeature::Empty
} else {
ForestFeature::Bear(b)
}
}
ForestFeature::BearTree(b, tree) => {
if movements.before_bears.contains(&i) {
match tree {
FloraVariant::Elder(l) => ForestFeature::Tree(FloraVariant::Elder(l)),
FloraVariant::Tree(l) => ForestFeature::Tree(FloraVariant::Tree(l)),
FloraVariant::Sapling(sap) => {
ForestFeature::Tree(FloraVariant::Sapling(sap))
}
}
} else {
ForestFeature::BearTree(b, tree)
}
}
l => l,
};
let moved_to_actions = |new_loc, i, movements: &Hashes| {
if let Some(bear) = movements.bears.get(&i) {
if let Some(lum) = movements.lumbers.get(&i) {
if movements.trees.contains(&i) {
//all three interact
match new_loc {
ForestFeature::Empty => (
ForestFeature::BearTree(
BearInfo::moved(),
FloraVariant::Sapling(Seedling {
current_age: i as u32,
}),
),
Some(ChangeEvent::SaplingPlantedLumberJackMauled(*lum)),
),
_ => panic!(" bear tree sap moved into undesriable location"),
}
} else {
// bear and lumberjack interact
match new_loc {
ForestFeature::Empty => (
ForestFeature::Bear(BearInfo::moved()),
Some(ChangeEvent::LumberJackMauled(*lum)),
),
ForestFeature::Tree(l) => match l {
FloraVariant::Sapling(a) => (
ForestFeature::BearTree(
BearInfo::moved(),
FloraVariant::Sapling(a),
),
Some(ChangeEvent::LumberJackMauled(*lum)),
),
FloraVariant::Tree(tree) => (
ForestFeature::BearTree(
BearInfo::moved(),
FloraVariant::Tree(tree),
),
Some(ChangeEvent::LumberJackMauled(*lum)),
),
FloraVariant::Elder(tree) => (
ForestFeature::BearTree(
BearInfo::moved(),
FloraVariant::Elder(tree),
),
Some(ChangeEvent::LumberJackMauled(*lum)),
),
},
_ => panic!("moved bear to invalid position lumber"),
}
}
} else if movements.trees.contains(&i) {
// bear and tree interact
match new_loc {
ForestFeature::Empty => (
ForestFeature::BearTree(
*bear,
FloraVariant::Sapling(Seedling { current_age: 0 }),
),
None,
),
ForestFeature::LumberJack(lum) => (
ForestFeature::BearTree(
BearInfo::moved(),
FloraVariant::Sapling(Seedling { current_age: 0 }),
),
Some(ChangeEvent::LumberJackMauled(lum)),
),
_ => panic!("bear and sapling went into invalid"),
}
} else {
// just the bear moves
match new_loc {
ForestFeature::Empty => (ForestFeature::Bear(*bear), None),
ForestFeature::LumberJack(lum) => (
ForestFeature::Bear(BearInfo::moved()),
Some(ChangeEvent::LumberJackMauled(lum)),
),
ForestFeature::LumberSeedling(lum, s) => (
ForestFeature::BearTree(BearInfo::moved(), FloraVariant::Sapling(s)),
Some(ChangeEvent::LumberJackMauled(lum)),
),
ForestFeature::Tree(l) => match l {
FloraVariant::Sapling(a) => (
ForestFeature::BearTree(*bear, FloraVariant::Sapling(a)),
None,
),
FloraVariant::Tree(tree) => (
ForestFeature::BearTree(*bear, FloraVariant::Tree(tree)),
None,
),
FloraVariant::Elder(tree) => (
ForestFeature::BearTree(*bear, FloraVariant::Elder(tree)),
None,
),
},
_ => panic!("bear moved to invalid position {:?}", new_loc),
}
}
} else if let Some(lum) = movements.lumbers.get(&i) {
if movements.trees.contains(&i) {
// lumbers and trees
match new_loc {
ForestFeature::Empty => (
ForestFeature::LumberSeedling(*lum, Seedling { current_age: 0 }),
Some(ChangeEvent::SaplingPlanted),
),
_ => panic!("lumber jack sap moved to invalid area"),
}
} else {
//lumber jack alone
match new_loc {
ForestFeature::Empty => (ForestFeature::LumberJack(*lum), None),
ForestFeature::Tree(l) => match l {
FloraVariant::Sapling(a) => {
(ForestFeature::LumberSeedling(*lum, a), None)
}
FloraVariant::Tree(_) => (
ForestFeature::LumberJack(Woodcutter {
lumber_collected: lum.lumber_collected + 1,
finished_moving: true,
}),
Some(ChangeEvent::TreeCutDown(1)),
),
FloraVariant::Elder(_) => (
ForestFeature::LumberJack(Woodcutter {
lumber_collected: lum.lumber_collected + 2,
finished_moving: true,
}),
Some(ChangeEvent::TreeCutDown(2)),
),
},
_ => panic!("invalid"),
}
}
} else if movements.trees.contains(&i) {
//trees alone
if let ForestFeature::Empty = new_loc {
(
ForestFeature::Tree(FloraVariant::Sapling(Seedling { current_age: 0 })),
Some(ChangeEvent::SaplingPlanted),
)
} else {
panic!("planting in a non empty zone {:?}", new_loc);
}
} else {
(new_loc, None)
}
};
simulated_forest
.batched_move_to(&changes, moved_from_actions, moved_to_actions)
.par_iter()
.filter_map(|x| {
x.map(|current_evnet| match current_evnet {
ChangeEvent::SaplingPlanted => Event {
saplings_planted: 1,
wood_cut: 0,
lumberjacks_mauled: 0,
},
ChangeEvent::TreeCutDown(wood) => Event {
saplings_planted: 0,
wood_cut: wood,
lumberjacks_mauled: 0,
},
ChangeEvent::LumberJackMauled(lum) => Event {
saplings_planted: 0,
wood_cut: lum.lumber_collected,
lumberjacks_mauled: 1,
},
ChangeEvent::SaplingPlantedLumberJackMauled(lum) => Event {
saplings_planted: 1,
wood_cut: lum.lumber_collected,
lumberjacks_mauled: 1,
},
})
})
.reduce(
|| Event {
saplings_planted: 0,
wood_cut: 0,
lumberjacks_mauled: 0,
},
|a, b| Event {
saplings_planted: a.saplings_planted + b.saplings_planted,
wood_cut: a.wood_cut + b.wood_cut,
lumberjacks_mauled: a.lumberjacks_mauled + b.lumberjacks_mauled,
},
)
}
fn collect_movements(simulated_forest: &Forest, move_phase: u32, pool: &ThreadPool) -> Hashes {
let (tree_transmitter, tree_receiver) = channel();
let (lumber_transmitter, lumber_receiver) = channel();
let (bear_transmiter, bear_receiver) = channel();
let shared_forest = Arc::new(simulated_forest.to_owned());
let tree_forest = Arc::clone(&shared_forest);
pool.execute(move || {
let tree_transmitter = tree_transmitter;
tree_transmitter
.send(if move_phase == 0 {
process_spawning(&tree_forest, &mut rand::thread_rng())
} else {
HashSet::new()
})
.unwrap();
});
let lumber_forest = Arc::clone(&shared_forest);
pool.execute(move || {
let lumber_transmitter = lumber_transmitter;
lumber_transmitter
.send(if move_phase < 3 {
process_lumberjacks(&lumber_forest, &mut rand::thread_rng())
} else {
(HashSet::new(), HashMap::new())
})
.unwrap();
});
let bear_forest = Arc::clone(&shared_forest);
pool.execute(move || {
let bear_transmiter = bear_transmiter;
bear_transmiter
.send(process_bear(&bear_forest, &mut rand::thread_rng()))
.unwrap();
});
//let (before_bears, after_bears) = process_bear(simulated_forest, &mut rand::thread_rng());
let seeds = tree_receiver.recv().unwrap();
let (before_lumbers, after_lumbers) = lumber_receiver.recv().unwrap();
let (before_bears, after_bears) = bear_receiver.recv().unwrap();
Hashes {
trees: seeds,
before_lumbers,
lumbers: after_lumbers,
before_bears,
bears: after_bears,
}
}
fn process_spawning(simulated_forest: &Forest, rng: &mut ThreadRng) -> HashSet<usize> {
let mut changed_locations = HashSet::new();
let mature_tress_criteria = |x| match x {
ForestFeature::Tree(t) => match t {
FloraVariant::Tree(_) => Some(MatureTree::Tree),
FloraVariant::Elder(_) => Some(MatureTree::Elder),
_ => None,
},
_ => None,
};
let mature_tress = simulated_forest.get_locations_with_info(mature_tress_criteria);
/* main planting proceess happens here */
for m in mature_tress {
/* Random chance of planting */
let gen = rng.gen_range(0, 10);
match m.feature {
MatureTree::Tree if gen == 0 => {
if let Some(i) = add_sap_to_forest(simulated_forest, rng, m, &changed_locations)
{
changed_locations.insert(i);
}
//simulated_forest.transform_feature((*new_site_x, *new_site_y), spawn_seedling);
}
MatureTree::Elder if gen == 0 || gen == 1 => {
if let Some(i) = add_sap_to_forest(simulated_forest, rng, m, &changed_locations)
{
changed_locations.insert(i);
}
//simulated_forest.transform_feature((*new_site_x, *new_site_y), spawn_seedling);
}
_ => continue,
}
}
changed_locations
}
fn add_sap_to_forest(
simulated_forest: &Forest,
rng: &mut ThreadRng,
m: FeatureLocation<MatureTree>,
changed_locations: &HashSet<usize>,
) -> Option<usize> {
let plantable_criteria = |x| match x {
ForestFeature::Empty => true,
_ => false,
};
let potential_locations =
simulated_forest.get_interactable_neighbours_locations(m.x, m.y, plantable_criteria);
let potential_locations = potential_locations
.iter()
.filter(|(x, y)| {
changed_locations
.get(&simulated_forest.get_absolute_cordinate(*x, *y))
.is_none()
})
.collect::<Vec<&(usize, usize)>>();
if potential_locations.is_empty() {
return None;
}
let (new_site_x, new_site_y) = potential_locations
.get(rng.gen_range(0, potential_locations.len()))
.unwrap();
Some(simulated_forest.get_absolute_cordinate(*new_site_x, *new_site_y))
}
fn process_lumberjacks(
simulated_forest: &Forest,
rng: &mut ThreadRng,
) -> (HashSet<usize>, HashMap<usize, Woodcutter>) {
let mut moved_to_locations = HashMap::new();
let mut moved_from_locations = HashSet::new();
let movable_lumberjack_criteria = |x| match x {
ForestFeature::LumberJack(l) if !l.finished_moving => Some(l),
ForestFeature::LumberSeedling(l, _) if !l.finished_moving => Some(l),
_ => None,
};
simulated_forest.get_locations_with_info::<Woodcutter>(movable_lumberjack_criteria);
let lumber_jack_locations =
simulated_forest.get_locations_with_info::<Woodcutter>(movable_lumberjack_criteria);
let loacation_criteria = |x| match x {
ForestFeature::Empty | ForestFeature::Tree(_) => true,
_ => false,
};
for current_lumberjack in lumber_jack_locations {
let new_destinations = simulated_forest.get_interactable_neighbours_locations(
current_lumberjack.x,
current_lumberjack.y,
loacation_criteria,
);
let new_destinations = new_destinations
.iter()
.filter(|(x, y)| {
moved_to_locations
.get(&simulated_forest.get_absolute_cordinate(*x, *y))
.is_none()
})
.collect::<Vec<&(usize, usize)>>();
if new_destinations.is_empty() {
continue;
}
let (new_site_x, new_site_y) = new_destinations
.get(rng.gen_range(0, new_destinations.len()))
.unwrap();
let new_absolute_loc =
simulated_forest.get_absolute_cordinate(*new_site_x, *new_site_y);
moved_to_locations.insert(new_absolute_loc, current_lumberjack.feature);
let old_absolue_loc =
simulated_forest.get_absolute_cordinate(current_lumberjack.x, current_lumberjack.y);
moved_from_locations.insert(old_absolue_loc);
}
(moved_from_locations, moved_to_locations)
}
///Movement of bears
fn process_bear(
simulated_forest: &Forest,
rng: &mut ThreadRng,
) -> (HashSet<usize>, HashMap<usize, BearInfo>) {
let mut moved_from_locations = HashSet::new();
let mut moved_to_locations = HashMap::new();
let movable_criteria = |x| match x {
ForestFeature::Bear(b) if !b.finished_moving => Some(b),
ForestFeature::BearTree(b, _) if !b.finished_moving => Some(b),
_ => None,
};
let bear_locations = simulated_forest.get_locations_with_info(movable_criteria);
let criteria = |x| match x {
ForestFeature::Empty
| ForestFeature::Tree(_)
| ForestFeature::LumberJack(_)
| ForestFeature::LumberSeedling(_, _) => true,
_ => false,
};
for current_bear in bear_locations {
let new_destinations = simulated_forest.get_interactable_neighbours_locations(
current_bear.x,
current_bear.y,
criteria,
);
let new_destinations = new_destinations
.iter()
.filter(|(x, y)| {
moved_to_locations
.get(&simulated_forest.get_absolute_cordinate(*x, *y))
.is_none()
})
.collect::<Vec<&(usize, usize)>>();
if new_destinations.is_empty() {
continue;
}
let (new_site_x, new_site_y) = new_destinations
.get(rng.gen_range(0, new_destinations.len()))
.unwrap();
let absolute_loc = simulated_forest.get_absolute_cordinate(*new_site_x, *new_site_y);
moved_to_locations.insert(absolute_loc, current_bear.feature);
let old_absolue_loc =
simulated_forest.get_absolute_cordinate(current_bear.x, current_bear.y);
moved_from_locations.insert(old_absolue_loc);
}
(moved_from_locations, moved_to_locations)
}
fn populate(
simulated_forest: &mut Forest,
rng: &mut ThreadRng,
wood_collected: u32,
maul_incidents: u32,
) -> Census {
let censare = simulated_forest.get_terrain_counts();
let delta_lumber = populate_lumberjacks(
simulated_forest,
rng,
wood_collected,
censare.lumberjack_count,
);
let delta_bears = populate_bears(simulated_forest, rng, maul_incidents);
Census {
bear_count: (censare.bear_count as i32 + delta_bears) as u32,
lumberjack_count: (censare.lumberjack_count as i32 + delta_lumber) as u32,
..censare
}
}
fn populate_lumberjacks(
simulated_forest: &mut Forest,
rng: &mut ThreadRng,
wood_collected: u32,
lumberjack_count: u32,
) -> i32 {
/*Calculate lumber jack changes*/
let delta_num_lumberjacks = if lumberjack_count == 0 {
1
} else if lumberjack_count > wood_collected {
-1
} else {
(wood_collected / lumberjack_count) as i32
};
match delta_num_lumberjacks.cmp(&0) {
Ordering::Equal => {}
Ordering::Greater => {
let spawnable_criteria = |x| match x {
ForestFeature::Empty => true,
ForestFeature::Tree(t) => match t {
FloraVariant::Sapling(_) => true,
_ => false,
},
_ => false,
};
let mut empty_locations = simulated_forest.get_locations(spawnable_criteria);
let mut lumber_spawn_sites = Vec::with_capacity(delta_num_lumberjacks as usize);
/* Collect randomly chosen spawn sites */
for _ in 0..delta_num_lumberjacks {
let new_lumber_location =
empty_locations.remove(rng.gen_range(0, empty_locations.len()));
lumber_spawn_sites.push(new_lumber_location);
}
/*Instruct the forest on how to spawn the new lumberjacks*/
let spawnable_transformation = |new_loc| match new_loc {
ForestFeature::Empty => ForestFeature::LumberJack(Woodcutter::new()),
ForestFeature::Tree(l) => match l {
FloraVariant::Sapling(a) => {
ForestFeature::LumberSeedling(Woodcutter::new(), a)
}
_ => panic!("spawning lumberJack invalid tree"),
},
_ => panic!("spawning lumberJack invalid location"),
};
simulated_forest.transform_features(&lumber_spawn_sites, spawnable_transformation);
}
Ordering::Less => {
let criteria = |x| match x {
ForestFeature::LumberJack(_) => true,
ForestFeature::LumberSeedling(_, _) => true,
_ => false,
};
let mut cullable_lumberjacks = simulated_forest.get_locations(criteria);
let culled_lumber_jack =
cullable_lumberjacks.remove(rng.gen_range(0, cullable_lumberjacks.len()));
let remove_lumber_transform = |new_loc| match new_loc {
ForestFeature::LumberJack(_) => ForestFeature::Empty,
ForestFeature::LumberSeedling(_, s) => {
ForestFeature::Tree(FloraVariant::Sapling(s))
}
_ => panic!("spawning lumberJack invalid location"),
};
simulated_forest.transform_features(&[culled_lumber_jack], remove_lumber_transform);
}
}
delta_num_lumberjacks
}
fn populate_bears(
simulated_forest: &mut Forest,
rng: &mut ThreadRng,
maul_incidents: u32,
) -> i32 {
match maul_incidents.cmp(&0) {
Ordering::Equal | Ordering::Less => {
/*where bears can spawn*/
let spawnable_criteria = |x| match x {
ForestFeature::Empty => true,
ForestFeature::Tree(_) => true,
_ => false,
};
let mut bear_spawn_spots = simulated_forest.get_locations(spawnable_criteria);
let new_bear_location =
bear_spawn_spots.remove(rng.gen_range(0, bear_spawn_spots.len()));
/*Instructions on how to spawn bears*/
let spawnable_transformation = |new_loc| match new_loc {
ForestFeature::Empty => ForestFeature::Bear(BearInfo::new()),
ForestFeature::Tree(l) => ForestFeature::BearTree(BearInfo::new(), l),
_ => panic!("spawning lumberJack invalid location"),
};
simulated_forest.transform_features(&[new_bear_location], spawnable_transformation);
1
}
Ordering::Greater => {
let criteria = |x| match x {
ForestFeature::Bear(_) => true,
ForestFeature::BearTree(_, _) => true,
_ => false,
};
let mut cullable_bears = simulated_forest.get_locations(criteria);
let culled_bear = cullable_bears.remove(rng.gen_range(0, cullable_bears.len()));
let remove_bear_transform = |new_loc| match new_loc {
ForestFeature::Bear(_) => ForestFeature::Empty,
ForestFeature::BearTree(_, t) => ForestFeature::Tree(t),
_ => panic!("spawning lumberJack invalid location"),
};
simulated_forest.transform_features(&[culled_bear], remove_bear_transform);
-1
}
}
}
}
|
//! This module implements a subset of the two phase commit specification presented in the paper
//! ["Consensus on Transaction Commit"](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-2003-96.pdf)
//! by Jim Gray and Leslie Lamport.
use stateright::report::WriteReporter;
use stateright::{Checker, Model, Property, Representative, Rewrite, RewritePlan};
use std::collections::BTreeSet;
use std::hash::Hash;
use std::ops::Range;
type R = usize; // represented by integers in 0..N-1
#[derive(Clone)]
struct TwoPhaseSys {
pub rms: Range<R>,
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
struct TwoPhaseState {
rm_state: Vec<RmState>, // map from each RM
tm_state: TmState,
tm_prepared: Vec<bool>, // map from each RM
msgs: BTreeSet<Message>,
}
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
enum Message {
Prepared { rm: R },
Commit,
Abort,
}
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
enum RmState {
Working,
Prepared,
Committed,
Aborted,
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
enum TmState {
Init,
Committed,
Aborted,
}
#[derive(Clone, Debug)]
enum Action {
TmRcvPrepared(R),
TmCommit,
TmAbort,
RmPrepare(R),
RmChooseToAbort(R),
RmRcvCommitMsg(R),
RmRcvAbortMsg(R),
}
impl Model for TwoPhaseSys {
type State = TwoPhaseState;
type Action = Action;
fn init_states(&self) -> Vec<Self::State> {
vec![TwoPhaseState {
rm_state: self.rms.clone().map(|_| RmState::Working).collect(),
tm_state: TmState::Init,
tm_prepared: self.rms.clone().map(|_| false).collect(),
msgs: Default::default(),
}]
}
fn actions(&self, state: &Self::State, actions: &mut Vec<Self::Action>) {
if state.tm_state == TmState::Init && state.tm_prepared.iter().all(|p| *p) {
actions.push(Action::TmCommit);
}
if state.tm_state == TmState::Init {
actions.push(Action::TmAbort);
}
for rm in self.rms.clone() {
if state.tm_state == TmState::Init
&& state.msgs.contains(&Message::Prepared { rm })
{
actions.push(Action::TmRcvPrepared(rm));
}
if state.rm_state.get(rm) == Some(&RmState::Working) {
actions.push(Action::RmPrepare(rm));
}
if state.rm_state.get(rm) == Some(&RmState::Working) {
actions.push(Action::RmChooseToAbort(rm));
}
if state.msgs.contains(&Message::Commit) {
actions.push(Action::RmRcvCommitMsg(rm));
}
if state.msgs.contains(&Message::Abort) {
actions.push(Action::RmRcvAbortMsg(rm));
}
}
}
fn next_state(&self, last_state: &Self::State, action: Self::Action) -> Option<Self::State> {
let mut state = last_state.clone();
match action {
Action::TmRcvPrepared(rm) => {
state.tm_prepared[rm] = true;
}
Action::TmCommit => {
state.tm_state = TmState::Committed;
state.msgs.insert(Message::Commit);
}
Action::TmAbort => {
state.tm_state = TmState::Aborted;
state.msgs.insert(Message::Abort);
}
Action::RmPrepare(rm) => {
state.rm_state[rm] = RmState::Prepared;
state.msgs.insert(Message::Prepared { rm });
}
Action::RmChooseToAbort(rm) => {
state.rm_state[rm] = RmState::Aborted;
}
Action::RmRcvCommitMsg(rm) => {
state.rm_state[rm] = RmState::Committed;
}
Action::RmRcvAbortMsg(rm) => {
state.rm_state[rm] = RmState::Aborted;
}
}
Some(state)
}
fn properties(&self) -> Vec<Property<Self>> {
vec![
Property::<Self>::sometimes("abort agreement", |_, state| {
state.rm_state.iter().all(|s| s == &RmState::Aborted)
}),
Property::<Self>::sometimes("commit agreement", |_, state| {
state.rm_state.iter().all(|s| s == &RmState::Committed)
}),
Property::<Self>::always("consistent", |_, state| {
!state.rm_state.iter().any(|s1| {
state
.rm_state
.iter()
.any(|s2| s1 == &RmState::Aborted && s2 == &RmState::Committed)
})
}),
]
}
}
#[cfg(test)]
#[test]
fn can_model_2pc() {
// for very small state space (using BFS this time)
let checker = TwoPhaseSys { rms: 0..3 }.checker().spawn_bfs().join();
assert_eq!(checker.unique_state_count(), 288);
checker.assert_properties();
// for slightly larger state space (using DFS this time)
let checker = TwoPhaseSys { rms: 0..5 }.checker().spawn_dfs().join();
assert_eq!(checker.unique_state_count(), 8_832);
checker.assert_properties();
// reverify the larger state space with symmetry reduction
let checker = TwoPhaseSys { rms: 0..5 }
.checker()
.symmetry()
.spawn_dfs()
.join();
assert_eq!(checker.unique_state_count(), 665);
checker.assert_properties();
}
fn main() -> Result<(), pico_args::Error> {
env_logger::init_from_env(env_logger::Env::default().default_filter_or("info")); // `RUST_LOG=${LEVEL}` env variable to override
let mut args = pico_args::Arguments::from_env();
match args.subcommand()?.as_deref() {
Some("check") => {
let rm_count = args.opt_free_from_str()?.unwrap_or(2);
println!(
"Checking two phase commit with {} resource managers.",
rm_count
);
TwoPhaseSys { rms: 0..rm_count }
.checker()
.threads(num_cpus::get())
.spawn_dfs()
.report(&mut WriteReporter::new(&mut std::io::stdout()));
}
Some("check-sym") => {
let rm_count = args.opt_free_from_str()?.unwrap_or(2);
println!(
"Checking two phase commit with {} resource managers using symmetry reduction.",
rm_count
);
TwoPhaseSys { rms: 0..rm_count }
.checker()
.threads(num_cpus::get())
.symmetry()
.spawn_dfs()
.report(&mut WriteReporter::new(&mut std::io::stdout()));
// Implementing this trait enables symmetry reduction to speed up model checking (optional).
impl Representative for TwoPhaseState {
fn representative(&self) -> Self {
let plan = RewritePlan::from_values_to_sort(&self.rm_state);
Self {
rm_state: plan.reindex(&self.rm_state),
tm_state: self.tm_state.clone(),
tm_prepared: plan.reindex(&self.tm_prepared),
msgs: self
.msgs
.iter()
.map(|m| match m {
Message::Prepared { rm } => Message::Prepared {
rm: plan.rewrite(rm),
},
Message::Commit => Message::Commit,
Message::Abort => Message::Abort,
})
.collect(),
}
}
}
impl<T> Rewrite<T> for RmState {
fn rewrite<S>(&self, _: &RewritePlan<T, S>) -> Self {
self.clone()
}
}
}
Some("explore") => {
let rm_count = args.opt_free_from_str()?.unwrap_or(2);
let address = args
.opt_free_from_str()?
.unwrap_or("localhost:3000".to_string());
println!(
"Exploring state space for two phase commit with {} resource managers on {}.",
rm_count, address
);
TwoPhaseSys { rms: 0..rm_count }
.checker()
.threads(num_cpus::get())
.serve(address);
}
_ => {
println!("USAGE:");
println!(" ./2pc check [RESOURCE_MANAGER_COUNT]");
println!(" ./2pc check-sym [RESOURCE_MANAGER_COUNT]");
println!(" ./2pc explore [RESOURCE_MANAGER_COUNT] [ADDRESS]");
}
}
Ok(())
}
|
//use djangohashers::*;
use dotenv;
// Serde imports
use serde::{Deserialize, Serialize};
// Mongo Imports
use mongodb::{
bson::{doc, oid::ObjectId, Bson},
sync::Client,
};
#[derive(Serialize, Deserialize, Debug)]
pub struct User {
pub email: String,
pub password: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Contact {
name: String,
email: String,
phone: String,
message: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct House {
id: String,
street_name: String,
image: String,
description: String,
long_description: String,
resident_type: String,
price: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Team {
id: String,
name: String,
image: String,
description: String,
member_type: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Blog {
id: String,
title: String,
author: String,
text: String,
date: String,
}
// Connect to the Mongo DB database
pub fn connect() -> mongodb::sync::Database {
println!("Connecting to MongoDB");
let client = Client::with_uri_str(dotenv::var("MONGO_URL").unwrap().as_str()).unwrap();
let db = client.database("realtor");
println!("Connection Established");
db
}
// Search for the user and see if user exists
pub fn check_user_exists(db: &mongodb::sync::Database, user: &User) -> bool {
let user_collection = db.collection("users");
let filter = doc! { "username": user.email.as_str() };
let cursor = user_collection.find(filter, None).unwrap();
if cursor.count() > 0 {
return true;
}
false
}
pub fn get_all_houses_from_mongo(db: &mongodb::sync::Database) -> Vec<House> {
let house_collection = db.collection("houses");
let cursor = house_collection.find(None, None).unwrap();
let mut houses: Vec<House> = Vec::new();
// Iterate over the results of the cursor.
for result in cursor {
let house_bson = result.unwrap();
// Create a house struct
let house = House {
id: house_bson
.get("_id")
.and_then(Bson::as_object_id)
.unwrap()
.to_string(),
street_name: house_bson
.get("streetName")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
image: house_bson
.get("image")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
description: house_bson
.get("description")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
long_description: house_bson
.get("longDescription")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
resident_type: house_bson
.get("type")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
price: house_bson.get("price").unwrap().to_string(),
};
houses.push(house);
}
houses
}
pub fn get_team_from_mongo(db: &mongodb::sync::Database) -> Vec<Team> {
let team_collection = db.collection("team");
let cursor = team_collection.find(None, None).unwrap();
let mut team: Vec<Team> = Vec::new();
// Iterate over the results of the cursor.
for result in cursor {
let team_bson = result.unwrap();
// Create a team struct
let member = Team {
id: team_bson
.get("_id")
.and_then(Bson::as_object_id)
.unwrap()
.to_string(),
name: team_bson
.get("name")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
image: team_bson
.get("image")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
description: team_bson
.get("description")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
member_type: team_bson
.get("type")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
};
team.push(member);
}
team
}
pub fn get_a_member_from_mongo(db: &mongodb::sync::Database, id: &str) -> Team {
let team_collection = db.collection("team");
let filter = doc! { "_id": ObjectId::with_string(&id).unwrap() };
let cursor = team_collection.find(filter, None).unwrap();
let mut member = Team {
id: String::new(),
name: String::new(),
image: String::new(),
description: String::new(),
member_type: String::new(),
};
// Iterate over the results of the cursor.
for result in cursor {
let team_bson = result.unwrap();
// Create a house struct
member = Team {
id: team_bson
.get("_id")
.and_then(Bson::as_object_id)
.unwrap()
.to_string(),
name: team_bson
.get("name")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
image: team_bson
.get("image")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
description: team_bson
.get("description")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
member_type: team_bson
.get("type")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
};
}
member
}
pub fn create_a_member_in_mongo(db: &mongodb::sync::Database, team: &Team) -> bool {
let team_collection = db.collection("team");
let filter = doc! { "name" : &team.name };
let cursor = team_collection.find(filter, None).unwrap();
let team = doc! {
"name": &team.name,
"image": &team.image,
"description": &team.description,
"type": &team.member_type,
};
if cursor.count() > 0 {
println!("Team member already exists!");
return false;
} else {
let result = team_collection.insert_one(team, None);
println!("{:?}", result);
return true;
};
}
pub fn update_a_member_in_mongo(db: &mongodb::sync::Database, team: &Team) -> bool {
let team_collection = db.collection("team");
let filter = doc! { "_id": ObjectId::with_string(&team.id).unwrap() };
let team = doc! {
"name": &team.name,
"image": &team.image,
"description": &team.description,
"member_type": &team.member_type,
};
let result = team_collection.update_one(filter, team, None);
println!("{:?}", result);
true
}
pub fn delete_a_member_in_mongo(db: &mongodb::sync::Database, team: &Team) -> bool {
let team_collection = db.collection("team");
let filter = doc! { "_id": ObjectId::with_string(&team.id).unwrap() };
let result = team_collection.delete_one(filter, None);
println!("{:?}", result);
true
}
pub fn get_a_house_from_mongo(db: &mongodb::sync::Database, id: &str) -> House {
let house_collection = db.collection("houses");
let filter = doc! { "_id": ObjectId::with_string(id).unwrap() };
let cursor = house_collection.find(filter, None).unwrap();
let mut house = House {
id: String::new(),
street_name: String::new(),
image: String::new(),
description: String::new(),
long_description: String::new(),
resident_type: String::new(),
price: String::new(),
};
// Iterate over the results of the cursor.
for result in cursor {
let house_bson = result.unwrap();
// Create a house struct
house.id = house_bson
.get("_id")
.and_then(Bson::as_object_id)
.unwrap()
.to_string();
house.street_name = house_bson
.get("streetName")
.and_then(Bson::as_str)
.unwrap()
.to_string();
house.image = house_bson
.get("image")
.and_then(Bson::as_str)
.unwrap()
.to_string();
house.description = house_bson
.get("description")
.and_then(Bson::as_str)
.unwrap()
.to_string();
house.long_description = house_bson
.get("longDescription")
.and_then(Bson::as_str)
.unwrap()
.to_string();
house.resident_type = house_bson
.get("type")
.and_then(Bson::as_str)
.unwrap()
.to_string();
house.price = house_bson.get("price").unwrap().to_string();
}
house
}
pub fn create_a_house_in_mongo(db: &mongodb::sync::Database, house: &House) -> bool {
let house_collection = db.collection("houses");
let filter = doc! { "streetName" : &house.street_name };
let cursor = house_collection.find(filter, None).unwrap();
let house = doc! {
"streetName": &house.street_name,
"image": &house.image,
"description": &house.description,
"longDescription": &house.long_description,
"type": &house.resident_type,
"price": &house.price,
};
if cursor.count() > 0 {
println!("House already exists!");
return false;
} else {
let result = house_collection.insert_one(house, None);
println!("{:?}", result);
return true;
};
}
pub fn update_a_house_in_mongo(db: &mongodb::sync::Database, house: &House) -> bool {
let house_collection = db.collection("houses");
let filter = doc! { "_id": ObjectId::with_string(&house.id).unwrap() };
let house = doc! {
"streetName": &house.street_name,
"image": &house.image,
"description": &house.description,
"longDescription": &house.long_description,
"type": &house.resident_type,
"price": &house.price,
};
let result = house_collection.update_one(filter, house, None);
println!("{:?}", result);
true
}
pub fn delete_a_house_in_mongo(db: &mongodb::sync::Database, house: &House) -> bool {
let house_collection = db.collection("houses");
let filter = doc! { "_id": ObjectId::with_string(&house.id).unwrap() };
let result = house_collection.delete_one(filter, None);
println!("{:?}", result);
true
}
pub fn create_a_contact_in_mongo(db: &mongodb::sync::Database, contact: &Contact) -> bool {
let contacts_collection = db.collection("contacts");
let doc = doc! {
"name": contact.name.as_str(),
"email": contact.email.as_str(),
"phone": contact.phone.as_str(),
"message": contact.message.as_str()
};
let _result = contacts_collection.insert_one(doc, None);
true
}
pub fn get_all_blog_from_mongo(db: &mongodb::sync::Database) -> Vec<Blog> {
let blog_collection = db.collection("blog");
let cursor = blog_collection.find(None, None).unwrap();
let mut blogs: Vec<Blog> = Vec::new();
// Iterate over the results of the cursor.
for result in cursor {
let blog_bson = result.unwrap();
// Create a blog struct
let blog = Blog {
id: blog_bson
.get("_id")
.and_then(Bson::as_object_id)
.unwrap()
.to_string(),
title: blog_bson
.get("title")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
author: blog_bson
.get("author")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
text: blog_bson
.get("text")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
date: blog_bson
.get("date")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
};
blogs.push(blog);
}
blogs
}
pub fn get_a_blog_from_mongo(db: &mongodb::sync::Database, id: &str) -> Blog {
let blog_collection = db.collection("blog");
let filter = doc! { "_id": ObjectId::with_string(&id).unwrap() };
let cursor = blog_collection.find(filter, None).unwrap();
let mut blog = Blog {
id: String::new(),
title: String::new(),
author: String::new(),
text: String::new(),
date: String::new(),
};
// Iterate over the results of the cursor.
for result in cursor {
let blog_bson = result.unwrap();
// Create a house struct
blog = Blog {
id: blog_bson
.get("_id")
.and_then(Bson::as_object_id)
.unwrap()
.to_string(),
title: blog_bson
.get("title")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
author: blog_bson
.get("image")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
text: blog_bson
.get("description")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
date: blog_bson
.get("longDescription")
.and_then(Bson::as_str)
.unwrap()
.to_string(),
};
}
blog
}
pub fn update_a_blog_in_mongo(db: &mongodb::sync::Database, blog: &Blog) -> bool {
let blog_collection = db.collection("blog");
let filter = doc! { "_id": ObjectId::with_string(&blog.id).unwrap() };
let blog = doc! {
"title": &blog.title,
"author": &blog.author,
"text": &blog.text,
"date": &blog.date,
};
let result = blog_collection.update_one(filter, blog, None);
println!("{:?}", result);
true
}
pub fn delete_a_blog_in_mongo(db: &mongodb::sync::Database, blog: &Blog) -> bool {
let blog_collection = db.collection("blog");
let filter = doc! { "_id": ObjectId::with_string(&blog.id).unwrap() };
let result = blog_collection.delete_one(filter, None);
println!("{:?}", result);
true
}
pub fn create_a_blog_in_mongo(db: &mongodb::sync::Database, blog: &Blog) -> bool {
let blog_collection = db.collection("blog");
let filter = doc! { "title" : &blog.title };
let cursor = blog_collection.find(filter, None).unwrap();
let blog = doc! {
"title": &blog.title,
"author": &blog.author,
"text": &blog.text,
"date": &blog.date,
};
if cursor.count() > 0 {
println!("Blog already exists!");
return false;
} else {
let result = blog_collection.insert_one(blog, None);
println!("{:?}", result);
return true;
};
}
|
#![no_std]
#![no_main]
extern crate panic_semihosting;
use cortex_m::{asm::delay, peripheral::DWT};
use embedded_hal::digital::v2::OutputPin;
use rtfm::cyccnt::{Instant, U32Ext as _};
use stm32f1xx_hal::usb::{Peripheral, UsbBus, UsbBusType};
use stm32f1xx_hal::{gpio, prelude::*};
use usb_device::bus;
use usb_device::prelude::*;
#[allow(unused)]
pub mod hid {
use usb_device::class_prelude::*;
use usb_device::Result;
pub const USB_CLASS_HID: u8 = 0x03;
const USB_SUBCLASS_NONE: u8 = 0x00;
const USB_SUBCLASS_BOOT: u8 = 0x01;
const USB_INTERFACE_NONE: u8 = 0x00;
const USB_INTERFACE_KEYBOARD: u8 = 0x01;
const USB_INTERFACE_MOUSE: u8 = 0x02;
const REQ_GET_REPORT: u8 = 0x01;
const REQ_GET_IDLE: u8 = 0x02;
const REQ_GET_PROTOCOL: u8 = 0x03;
const REQ_SET_REPORT: u8 = 0x09;
const REQ_SET_IDLE: u8 = 0x0a;
const REQ_SET_PROTOCOL: u8 = 0x0b;
// https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/mouse-collection-report-descriptor
const REPORT_DESCR: &[u8] = &[
0x05, 0x01, // USAGE_PAGE (Generic Desktop)
0x09, 0x02, // USAGE (Mouse)
0xa1, 0x01, // COLLECTION (Application)
0x09, 0x01, // USAGE (Pointer)
0xa1, 0x00, // COLLECTION (Physical)
0x05, 0x09, // USAGE_PAGE (Button)
0x19, 0x01, // USAGE_MINIMUM (Button 1)
0x29, 0x03, // USAGE_MAXIMUM (Button 3)
0x15, 0x00, // LOGICAL_MINIMUM (0)
0x25, 0x01, // LOGICAL_MAXIMUM (1)
0x95, 0x03, // REPORT_COUNT (3)
0x75, 0x01, // REPORT_SIZE (1)
0x81, 0x02, // INPUT (Data,Var,Abs)
0x95, 0x01, // REPORT_COUNT (1)
0x75, 0x05, // REPORT_SIZE (5)
0x81, 0x03, // INPUT (Cnst,Var,Abs)
0x05, 0x01, // USAGE_PAGE (Generic Desktop)
0x09, 0x30, // USAGE (X)
0x09, 0x31, // USAGE (Y)
0x15, 0x81, // LOGICAL_MINIMUM (-127)
0x25, 0x7f, // LOGICAL_MAXIMUM (127)
0x75, 0x08, // REPORT_SIZE (8)
0x95, 0x02, // REPORT_COUNT (2)
0x81, 0x06, // INPUT (Data,Var,Rel)
0xc0, // END_COLLECTION
0xc0, // END_COLLECTION
];
pub fn report(x: i8, y: i8) -> [u8; 3] {
[
0x00, // button: none
x as u8, // x-axis
y as u8, // y-axis
]
}
pub struct HIDClass<'a, B: UsbBus> {
report_if: InterfaceNumber,
report_ep: EndpointIn<'a, B>,
}
impl<B: UsbBus> HIDClass<'_, B> {
/// Creates a new HIDClass with the provided UsbBus and max_packet_size in bytes. For
/// full-speed devices, max_packet_size has to be one of 8, 16, 32 or 64.
pub fn new(alloc: &UsbBusAllocator<B>) -> HIDClass<'_, B> {
HIDClass {
report_if: alloc.interface(),
report_ep: alloc.interrupt(8, 10),
}
}
pub fn write(&mut self, data: &[u8]) {
self.report_ep.write(data).ok();
}
}
impl<B: UsbBus> UsbClass<B> for HIDClass<'_, B> {
fn get_configuration_descriptors(&self, writer: &mut DescriptorWriter) -> Result<()> {
writer.interface(
self.report_if,
USB_CLASS_HID,
USB_SUBCLASS_NONE,
USB_INTERFACE_MOUSE,
)?;
let descr_len: u16 = REPORT_DESCR.len() as u16;
writer.write(
0x21,
&[
0x01, // bcdHID
0x01, // bcdHID
0x00, // bContryCode
0x01, // bNumDescriptors
0x22, // bDescriptorType
descr_len as u8, // wDescriptorLength
(descr_len >> 8) as u8, // wDescriptorLength
],
)?;
writer.endpoint(&self.report_ep)?;
Ok(())
}
fn control_in(&mut self, xfer: ControlIn<B>) {
let req = xfer.request();
if req.request_type == control::RequestType::Standard {
match (req.recipient, req.request) {
(control::Recipient::Interface, control::Request::GET_DESCRIPTOR) => {
let (dtype, _index) = req.descriptor_type_index();
if dtype == 0x21 {
// HID descriptor
cortex_m::asm::bkpt();
let descr_len: u16 = REPORT_DESCR.len() as u16;
// HID descriptor
let descr = &[
0x09, // length
0x21, // descriptor type
0x01, // bcdHID
0x01, // bcdHID
0x00, // bCountryCode
0x01, // bNumDescriptors
0x22, // bDescriptorType
descr_len as u8, // wDescriptorLength
(descr_len >> 8) as u8, // wDescriptorLength
];
xfer.accept_with(descr).ok();
return;
} else if dtype == 0x22 {
// Report descriptor
xfer.accept_with(REPORT_DESCR).ok();
return;
}
}
_ => {
return;
}
};
}
if !(req.request_type == control::RequestType::Class
&& req.recipient == control::Recipient::Interface
&& req.index == u8::from(self.report_if) as u16)
{
return;
}
match req.request {
REQ_GET_REPORT => {
// USB host requests for report
// I'm not sure what should we do here, so just send empty report
xfer.accept_with(&report(0, 0)).ok();
}
_ => {
xfer.reject().ok();
}
}
}
fn control_out(&mut self, xfer: ControlOut<B>) {
let req = xfer.request();
if !(req.request_type == control::RequestType::Class
&& req.recipient == control::Recipient::Interface
&& req.index == u8::from(self.report_if) as u16)
{
return;
}
xfer.reject().ok();
}
}
}
use hid::HIDClass;
type LED = gpio::gpioc::PC13<gpio::Output<gpio::PushPull>>;
const PERIOD: u32 = 8_000_000;
#[rtfm::app(device = stm32f1xx_hal::stm32, peripherals = true, monotonic = rtfm::cyccnt::CYCCNT)]
const APP: () = {
struct Resources {
counter: u8,
led: LED,
usb_dev: UsbDevice<'static, UsbBusType>,
hid: HIDClass<'static, UsbBusType>,
}
#[init(schedule = [on_tick])]
fn init(mut cx: init::Context) -> init::LateResources {
static mut USB_BUS: Option<bus::UsbBusAllocator<UsbBusType>> = None;
cx.core.DCB.enable_trace();
DWT::unlock();
cx.core.DWT.enable_cycle_counter();
let mut flash = cx.device.FLASH.constrain();
let mut rcc = cx.device.RCC.constrain();
let mut gpioc = cx.device.GPIOC.split(&mut rcc.apb2);
let led = gpioc.pc13.into_push_pull_output(&mut gpioc.crh);
let clocks = rcc
.cfgr
.use_hse(8.mhz())
.sysclk(48.mhz())
.pclk1(24.mhz())
.freeze(&mut flash.acr);
assert!(clocks.usbclk_valid());
let mut gpioa = cx.device.GPIOA.split(&mut rcc.apb2);
// BluePill board has a pull-up resistor on the D+ line.
// Pull the D+ pin down to send a RESET condition to the USB bus.
let mut usb_dp = gpioa.pa12.into_push_pull_output(&mut gpioa.crh);
usb_dp.set_low().ok();
delay(clocks.sysclk().0 / 100);
let usb_dm = gpioa.pa11;
let usb_dp = usb_dp.into_floating_input(&mut gpioa.crh);
let usb = Peripheral {
usb: cx.device.USB,
pin_dm: usb_dm,
pin_dp: usb_dp,
};
*USB_BUS = Some(UsbBus::new(usb));
let hid = HIDClass::new(USB_BUS.as_ref().unwrap());
let usb_dev = UsbDeviceBuilder::new(USB_BUS.as_ref().unwrap(), UsbVidPid(0xc410, 0x0000))
.manufacturer("Fake company")
.product("mouse")
.serial_number("TEST")
.device_class(0)
.build();
cx.schedule.on_tick(cx.start + PERIOD.cycles()).ok();
init::LateResources {
counter: 0,
led,
usb_dev,
hid,
}
}
#[task(schedule = [on_tick], resources = [counter, led, hid])]
fn on_tick(mut cx: on_tick::Context) {
cx.schedule.on_tick(Instant::now() + PERIOD.cycles()).ok();
let counter: &mut u8 = &mut cx.resources.counter;
let led = &mut cx.resources.led;
let hid = &mut cx.resources.hid;
const P: u8 = 2;
*counter = (*counter + 1) % P;
// move mouse cursor horizontally (x-axis) while blinking LED
if *counter < P / 2 {
led.set_high().ok();
hid.write(&hid::report(10, 0));
} else {
led.set_low().ok();
hid.write(&hid::report(-10, 0));
}
}
#[task(binds=USB_HP_CAN_TX, resources = [counter, led, usb_dev, hid])]
fn usb_tx(mut cx: usb_tx::Context) {
usb_poll(
&mut cx.resources.counter,
&mut cx.resources.led,
&mut cx.resources.usb_dev,
&mut cx.resources.hid,
);
}
#[task(binds=USB_LP_CAN_RX0, resources = [counter, led, usb_dev, hid])]
fn usb_rx(mut cx: usb_rx::Context) {
usb_poll(
&mut cx.resources.counter,
&mut cx.resources.led,
&mut cx.resources.usb_dev,
&mut cx.resources.hid,
);
}
extern "C" {
fn EXTI0();
}
};
fn usb_poll<B: bus::UsbBus>(
_counter: &mut u8,
_led: &mut LED,
usb_dev: &mut UsbDevice<'static, B>,
hid: &mut HIDClass<'static, B>,
) {
if !usb_dev.poll(&mut [hid]) {
return;
}
}
|
#[derive(Debug)]
pub(crate) struct LoggingFramed<T>(tokio::codec::Framed<T, crate::proto::PacketCodec>)
where
T: tokio::io::AsyncRead + tokio::io::AsyncWrite;
impl<T> LoggingFramed<T>
where
T: tokio::io::AsyncRead + tokio::io::AsyncWrite,
{
pub(crate) fn new(io: T) -> Self {
LoggingFramed(tokio::codec::Framed::new(io, Default::default()))
}
}
impl<T> futures::Sink for LoggingFramed<T>
where
T: tokio::io::AsyncRead + tokio::io::AsyncWrite,
{
type SinkItem = <tokio::codec::Framed<T, crate::proto::PacketCodec> as futures::Sink>::SinkItem;
type SinkError =
<tokio::codec::Framed<T, crate::proto::PacketCodec> as futures::Sink>::SinkError;
fn start_send(
&mut self,
item: Self::SinkItem,
) -> futures::StartSend<Self::SinkItem, Self::SinkError> {
log::trace!(">>> {:?}", item);
self.0.start_send(item)
}
fn poll_complete(&mut self) -> futures::Poll<(), Self::SinkError> {
self.0.poll_complete()
}
}
impl<T> futures::Stream for LoggingFramed<T>
where
T: tokio::io::AsyncRead + tokio::io::AsyncWrite,
{
type Item = <tokio::codec::Framed<T, crate::proto::PacketCodec> as futures::Stream>::Item;
type Error = <tokio::codec::Framed<T, crate::proto::PacketCodec> as futures::Stream>::Error;
fn poll(&mut self) -> futures::Poll<Option<Self::Item>, Self::Error> {
let result = self.0.poll()?;
if let futures::Async::Ready(Some(item)) = &result {
log::trace!("<<< {:?}", item);
}
Ok(result)
}
}
|
use std::cmp::max;
use std::collections::HashMap;
use std::hash::{
Hash,
Hasher,
};
use std::sync::Arc;
use std::usize;
use ash::vk;
use smallvec::SmallVec;
use sourcerenderer_core::graphics::{
AttachmentRef,
DepthStencilAttachmentRef,
Format,
LoadOp,
OutputAttachmentRef,
RenderPassPipelineStage,
SampleCount,
StoreOp,
};
use crate::format::format_to_vk;
use crate::pipeline::samples_to_vk;
use crate::raw::RawVkDevice;
use crate::texture::VkTextureView;
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub(crate) struct VkAttachmentInfo {
pub(crate) format: Format,
pub(crate) samples: SampleCount,
pub(crate) load_op: LoadOp,
pub(crate) store_op: StoreOp,
pub(crate) stencil_load_op: LoadOp,
pub(crate) stencil_store_op: StoreOp,
}
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub(crate) struct VkSubpassInfo {
pub(crate) input_attachments: SmallVec<[AttachmentRef; 16]>,
pub(crate) output_color_attachments: SmallVec<[OutputAttachmentRef; 16]>,
pub(crate) depth_stencil_attachment: Option<DepthStencilAttachmentRef>,
}
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub(crate) struct VkRenderPassInfo {
pub(crate) attachments: SmallVec<[VkAttachmentInfo; 16]>,
pub(crate) subpasses: SmallVec<[VkSubpassInfo; 16]>,
}
#[derive(Default)]
struct VkRenderPassAttachmentMetadata {
pub produced_in_subpass: u32,
pub last_used_in_subpass: u32,
initial_layout: Option<vk::ImageLayout>,
final_layout: Option<vk::ImageLayout>,
}
pub struct VkRenderPass {
device: Arc<RawVkDevice>,
render_pass: vk::RenderPass,
}
impl VkRenderPass {
pub(crate) fn new(device: &Arc<RawVkDevice>, info: &VkRenderPassInfo) -> Self {
let mut attachment_references = Vec::<vk::AttachmentReference>::new();
let mut subpass_infos = Vec::<vk::SubpassDescription>::with_capacity(info.subpasses.len());
let mut dependencies = Vec::<vk::SubpassDependency>::new();
let mut preserve_attachments = Vec::<u32>::new();
let mut attachment_count = 0;
for subpass in &info.subpasses {
if subpass.depth_stencil_attachment.is_some() {
attachment_count += 1;
}
attachment_count += subpass.input_attachments.len();
attachment_count += subpass.output_color_attachments.len() * 2;
}
attachment_references.reserve_exact(attachment_count); // We must not allocate after this so the pointers stay valid
preserve_attachments.reserve_exact(info.attachments.len() * info.subpasses.len());
let mut attachment_metadata = HashMap::<u32, VkRenderPassAttachmentMetadata>::new();
let mut subpass_attachment_bitmasks = Vec::<u64>::new();
subpass_attachment_bitmasks.resize(info.subpasses.len(), 0);
let subpass_dependencies_start = dependencies.len();
for (subpass_index, subpass) in info.subpasses.iter().enumerate() {
let subpass_attachment_bitmask: &mut u64 =
subpass_attachment_bitmasks.get_mut(subpass_index).unwrap();
for input_attachment in &subpass.input_attachments {
let metadata = attachment_metadata
.entry(input_attachment.index)
.or_default();
let attachment_info = info
.attachments
.get(input_attachment.index as usize)
.unwrap();
let mut dependency_opt = Some(build_dependency(
subpass_index as u32,
metadata.produced_in_subpass,
attachment_info.format,
input_attachment.pipeline_stage,
));
dependency_opt = merge_dependencies(
&mut dependencies[subpass_dependencies_start..],
dependency_opt.unwrap(),
);
if let Some(dependency) = dependency_opt {
dependencies.push(dependency);
}
if metadata.initial_layout.is_none() {
metadata.initial_layout = Some(vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL);
}
metadata.last_used_in_subpass =
max(metadata.last_used_in_subpass, subpass_index as u32);
metadata.final_layout = Some(vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL);
*subpass_attachment_bitmask |= 1 << input_attachment.index;
}
for color_attachment in &subpass.output_color_attachments {
let metadata = attachment_metadata
.entry(color_attachment.index)
.or_default();
metadata.produced_in_subpass =
max(metadata.produced_in_subpass, subpass_index as u32);
if metadata.initial_layout.is_none() {
metadata.initial_layout = Some(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL);
}
metadata.final_layout = Some(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL);
*subpass_attachment_bitmask |= 1 << color_attachment.index;
if let Some(resolve_attachment_index) = color_attachment.resolve_attachment_index {
let resolve_metadata = attachment_metadata
.entry(resolve_attachment_index)
.or_default();
resolve_metadata.produced_in_subpass =
max(resolve_metadata.produced_in_subpass, subpass_index as u32);
if resolve_metadata.initial_layout.is_none() {
resolve_metadata.initial_layout =
Some(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL);
}
resolve_metadata.final_layout = Some(vk::ImageLayout::TRANSFER_DST_OPTIMAL);
*subpass_attachment_bitmask |= 1 << resolve_attachment_index;
}
}
if let Some(depth_stencil_attachment) = &subpass.depth_stencil_attachment {
let depth_stencil_metadata = attachment_metadata
.entry(depth_stencil_attachment.index)
.or_default();
if depth_stencil_metadata.initial_layout.is_none() {
depth_stencil_metadata.initial_layout =
Some(if depth_stencil_attachment.read_only {
vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL
} else {
vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL
});
} else {
let mut dependency_opt = Some(build_depth_stencil_dependency(
subpass_index as u32,
depth_stencil_metadata.last_used_in_subpass,
depth_stencil_metadata.initial_layout
== Some(vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL),
));
dependency_opt = merge_dependencies(
&mut dependencies[subpass_dependencies_start..],
dependency_opt.unwrap(),
);
if let Some(dependency) = dependency_opt {
dependencies.push(dependency);
}
}
if depth_stencil_attachment.read_only {
depth_stencil_metadata.final_layout =
Some(vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL);
depth_stencil_metadata.last_used_in_subpass = max(
depth_stencil_metadata.last_used_in_subpass,
subpass_index as u32,
);
} else {
depth_stencil_metadata.final_layout =
Some(vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
depth_stencil_metadata.produced_in_subpass = max(
depth_stencil_metadata.produced_in_subpass,
subpass_index as u32,
);
};
*subpass_attachment_bitmask |= 1 << depth_stencil_attachment.index;
}
}
for (index, attachment) in info.attachments.iter().enumerate() {
if attachment.store_op == StoreOp::DontCare
|| attachment.stencil_store_op == StoreOp::DontCare
{
let metadata = attachment_metadata.entry(index as u32).or_default();
metadata.last_used_in_subpass = vk::SUBPASS_EXTERNAL;
}
}
for (subpass_index, subpass) in info.subpasses.iter().enumerate() {
let p_input_attachments = unsafe {
attachment_references
.as_ptr()
.add(attachment_references.len())
};
for input_attachment in &subpass.input_attachments {
attachment_references.push(vk::AttachmentReference {
attachment: input_attachment.index,
layout: vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
});
debug_assert_eq!(attachment_references.capacity(), attachment_count);
}
let p_color_attachments = unsafe {
attachment_references
.as_ptr()
.add(attachment_references.len())
};
for color_attachment in &subpass.output_color_attachments {
attachment_references.push(vk::AttachmentReference {
attachment: color_attachment.index,
layout: vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL,
});
debug_assert_eq!(attachment_references.capacity(), attachment_count);
}
let p_resolve_attachments = unsafe {
attachment_references
.as_ptr()
.add(attachment_references.len())
};
for color_attachment in &subpass.output_color_attachments {
if let Some(resolve_attachment_index) = color_attachment.resolve_attachment_index {
attachment_references.push(vk::AttachmentReference {
attachment: resolve_attachment_index,
layout: vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
});
debug_assert_eq!(attachment_references.capacity(), attachment_count);
} else {
attachment_references.push(vk::AttachmentReference {
attachment: vk::ATTACHMENT_UNUSED,
layout: vk::ImageLayout::UNDEFINED,
});
debug_assert_eq!(attachment_references.capacity(), attachment_count);
}
}
let p_depth_stencil_attachment =
if let Some(depth_stencil_attachment) = &subpass.depth_stencil_attachment {
let p_depth_stencil_attachment = unsafe {
attachment_references
.as_ptr()
.add(attachment_references.len())
};
attachment_references.push(vk::AttachmentReference {
attachment: depth_stencil_attachment.index,
layout: if depth_stencil_attachment.read_only {
vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL
} else {
vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL
},
});
debug_assert_eq!(attachment_references.capacity(), attachment_count);
p_depth_stencil_attachment
} else {
std::ptr::null()
};
let p_preserve_attachments = unsafe {
preserve_attachments
.as_ptr()
.add(preserve_attachments.len())
};
let preserve_attachments_offset = preserve_attachments.len();
for (index, _) in info.attachments.iter().enumerate() {
let subpass_attachment_bitmask =
subpass_attachment_bitmasks.get(subpass_index).unwrap();
let metadata = attachment_metadata.get(&(index as u32)).unwrap();
if (subpass_attachment_bitmask & (1u64 << index as u64)) == 0
&& metadata.last_used_in_subpass > (subpass_index as u32)
{
preserve_attachments.push(index as u32);
}
}
subpass_infos.push(vk::SubpassDescription {
flags: vk::SubpassDescriptionFlags::empty(),
pipeline_bind_point: vk::PipelineBindPoint::GRAPHICS,
input_attachment_count: subpass.input_attachments.len() as u32,
p_input_attachments,
color_attachment_count: subpass.output_color_attachments.len() as u32,
p_color_attachments,
p_resolve_attachments: if subpass.output_color_attachments.is_empty() {
std::ptr::null()
} else {
p_resolve_attachments
}, // QCOM bug workaround
p_depth_stencil_attachment,
preserve_attachment_count: (preserve_attachments.len()
- preserve_attachments_offset)
as u32,
p_preserve_attachments,
});
// QCOM driver bug: https://developer.qualcomm.com/forum/qdn-forums/software/adreno-gpu-sdk/68949
}
let attachments: Vec<vk::AttachmentDescription> = info
.attachments
.iter()
.enumerate()
.map(|(index, a)| {
let metadata = attachment_metadata.get(&(index as u32)).unwrap();
vk::AttachmentDescription {
flags: vk::AttachmentDescriptionFlags::empty(),
format: format_to_vk(a.format, device.supports_d24),
samples: samples_to_vk(a.samples),
load_op: load_op_to_vk(a.load_op),
store_op: store_op_to_vk(a.store_op),
stencil_load_op: load_op_to_vk(a.stencil_load_op),
stencil_store_op: store_op_to_vk(a.stencil_store_op),
initial_layout: metadata.initial_layout.unwrap(),
final_layout: metadata.final_layout.unwrap(),
}
})
.collect();
let rp_info = vk::RenderPassCreateInfo {
flags: vk::RenderPassCreateFlags::empty(),
attachment_count: attachments.len() as u32,
p_attachments: attachments.as_ptr(),
subpass_count: subpass_infos.len() as u32,
p_subpasses: subpass_infos.as_ptr(),
dependency_count: dependencies.len() as u32,
p_dependencies: dependencies.as_ptr(),
..Default::default()
};
Self {
device: device.clone(),
render_pass: unsafe { device.create_render_pass(&rp_info, None).unwrap() },
}
}
pub fn handle(&self) -> &vk::RenderPass {
&self.render_pass
}
}
impl Drop for VkRenderPass {
fn drop(&mut self) {
unsafe {
self.device.destroy_render_pass(self.render_pass, None);
}
}
}
impl Hash for VkRenderPass {
fn hash<H: Hasher>(&self, state: &mut H) {
self.render_pass.hash(state);
}
}
impl PartialEq for VkRenderPass {
fn eq(&self, other: &Self) -> bool {
self.render_pass == other.render_pass
}
}
impl Eq for VkRenderPass {}
pub struct VkFrameBuffer {
device: Arc<RawVkDevice>,
frame_buffer: vk::Framebuffer,
width: u32,
height: u32,
render_pass: Arc<VkRenderPass>,
attachments: SmallVec<[Arc<VkTextureView>; 8]>,
}
impl VkFrameBuffer {
pub(crate) fn new(
device: &Arc<RawVkDevice>,
width: u32,
height: u32,
render_pass: &Arc<VkRenderPass>,
attachments: &[&Arc<VkTextureView>],
) -> Self {
let mut vk_attachments = SmallVec::<[vk::ImageView; 8]>::new();
let mut attachment_refs = SmallVec::<[Arc<VkTextureView>; 8]>::new();
for attachment in attachments {
vk_attachments.push(*attachment.view_handle());
attachment_refs.push((*attachment).clone());
}
Self {
device: device.clone(),
frame_buffer: unsafe {
device
.create_framebuffer(
&vk::FramebufferCreateInfo {
flags: vk::FramebufferCreateFlags::empty(),
render_pass: *render_pass.handle(),
attachment_count: vk_attachments.len() as u32,
p_attachments: vk_attachments.as_ptr(),
width,
height,
layers: 1,
..Default::default()
},
None,
)
.unwrap()
},
width,
height,
attachments: attachment_refs,
render_pass: render_pass.clone(),
}
}
pub(crate) fn handle(&self) -> &vk::Framebuffer {
&self.frame_buffer
}
pub(crate) fn width(&self) -> u32 {
self.width
}
pub(crate) fn height(&self) -> u32 {
self.height
}
}
impl Drop for VkFrameBuffer {
fn drop(&mut self) {
unsafe {
self.device.destroy_framebuffer(self.frame_buffer, None);
}
}
}
fn load_op_to_vk(load_op: LoadOp) -> vk::AttachmentLoadOp {
match load_op {
LoadOp::Load => vk::AttachmentLoadOp::LOAD,
LoadOp::Clear => vk::AttachmentLoadOp::CLEAR,
LoadOp::DontCare => vk::AttachmentLoadOp::DONT_CARE,
}
}
fn store_op_to_vk(store_op: StoreOp) -> vk::AttachmentStoreOp {
match store_op {
StoreOp::DontCare => vk::AttachmentStoreOp::DONT_CARE,
StoreOp::Store => vk::AttachmentStoreOp::STORE,
}
}
fn build_dependency(
subpass_index: u32,
src_subpass: u32,
format: Format,
stage: RenderPassPipelineStage,
) -> vk::SubpassDependency {
let mut vk_pipeline_stages = vk::PipelineStageFlags::empty();
if stage.contains(RenderPassPipelineStage::FRAGMENT) {
vk_pipeline_stages |= vk::PipelineStageFlags::FRAGMENT_SHADER;
}
if stage.contains(RenderPassPipelineStage::VERTEX) {
vk_pipeline_stages |= vk::PipelineStageFlags::VERTEX_SHADER;
}
vk::SubpassDependency {
src_subpass,
dst_subpass: subpass_index,
src_stage_mask: if format.is_depth() || format.is_stencil() {
vk::PipelineStageFlags::LATE_FRAGMENT_TESTS
} else {
vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT
},
dst_stage_mask: vk_pipeline_stages,
src_access_mask: if format.is_depth() || format.is_stencil() {
vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE
} else {
vk::AccessFlags::COLOR_ATTACHMENT_WRITE
},
dst_access_mask: vk::AccessFlags::SHADER_READ,
dependency_flags: vk::DependencyFlags::BY_REGION,
}
}
fn build_depth_stencil_dependency(
subpass_index: u32,
src_subpass: u32,
memory_barrier: bool,
) -> vk::SubpassDependency {
vk::SubpassDependency {
src_subpass,
dst_subpass: subpass_index,
src_stage_mask: vk::PipelineStageFlags::LATE_FRAGMENT_TESTS,
dst_stage_mask: vk::PipelineStageFlags::EARLY_FRAGMENT_TESTS,
src_access_mask: if memory_barrier {
vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE
} else {
vk::AccessFlags::empty()
},
dst_access_mask: if memory_barrier {
vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
| vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE
} else {
vk::AccessFlags::empty()
},
dependency_flags: vk::DependencyFlags::BY_REGION,
}
}
fn merge_dependencies(
dependencies: &mut [vk::SubpassDependency],
dependency: vk::SubpassDependency,
) -> Option<vk::SubpassDependency> {
for existing_dependency in dependencies {
if existing_dependency.src_subpass == dependency.src_subpass
&& existing_dependency.dst_subpass == dependency.dst_subpass
{
existing_dependency.src_access_mask |= dependency.src_access_mask;
existing_dependency.dst_access_mask |= dependency.dst_access_mask;
existing_dependency.src_stage_mask |= dependency.src_stage_mask;
existing_dependency.dst_stage_mask |= dependency.dst_stage_mask;
return None;
}
}
Some(dependency)
}
|
#![allow(dead_code)]
extern crate rand;
extern crate termion;
use std::io;
use termion::input::MouseTerminal;
use termion::raw::IntoRawMode;
use termion::screen::AlternateScreen;
mod grid;
mod game;
use game::Game;
fn main() {
let stdin = io::stdin();
let screen = AlternateScreen::from(io::stdout().into_raw_mode().unwrap());
let stdout = MouseTerminal::from(screen);
let mut game = Game::new(stdin, stdout);
game.run();
}
|
//! Infrastructure of lifting the data representation (DR) into structured
//! representation (SR).
mod storage;
use self::storage::LiftStorage;
use crate::{
dr,
sr::{instructions, module, ops, storage::Token, Constant, StructMember, Type},
};
use std::{borrow::Borrow, mem};
/// A structure that we associate an <id> with, containing
/// both the operation token and the result type.
struct OpInfo {
op: Token<ops::Op>,
ty: Option<Token<Type>>,
}
impl Borrow<Token<ops::Op>> for OpInfo {
fn borrow(&self) -> &Token<ops::Op> {
&self.op
}
}
pub struct LiftContext {
//current_block: Option<Token<module::Block>>,
types: LiftStorage<Type>,
constants: LiftStorage<Constant>,
blocks: LiftStorage<module::Block>,
ops: LiftStorage<ops::Op, OpInfo>,
}
include!("autogen_context.rs");
/// Error lifting a data representation of an operand into the structured
/// representation.
#[derive(Clone, Debug)]
pub enum OperandError {
/// Operand has a wrong type.
WrongType,
/// Operand is an integer value that corresponds to a specified enum,
/// but the given integer is not known to have a mapping.
WrongEnumValue,
/// Operand is missing from the list.
Missing,
}
/// Error lifting a data representation of an instruction.
#[derive(Clone, Debug)]
pub enum InstructionError {
/// Instruction has a wrong opcode.
WrongOpcode,
/// Instruction is missing a result <id> or type.
MissingResult,
/// One of the operands can not be lifted.
Operand(OperandError),
}
impl From<OperandError> for InstructionError {
fn from(error: OperandError) -> Self {
InstructionError::Operand(error)
}
}
/// Error that may occur during the convesion from the data representation
/// of a module into a structured representation.
#[derive(Clone, Debug)]
pub enum ConversionError {
MissingHeader,
MissingFunction,
MissingFunctionType,
MissingLabel,
MissingTerminator,
Instruction(InstructionError),
}
impl From<InstructionError> for ConversionError {
fn from(error: InstructionError) -> Self {
ConversionError::Instruction(error)
}
}
impl LiftContext {
/// Convert a module from the data representation into structured representation.
pub fn convert(module: &dr::Module) -> Result<module::Module, ConversionError> {
let mut context = LiftContext {
types: LiftStorage::new(),
constants: LiftStorage::new(),
blocks: LiftStorage::new(),
ops: LiftStorage::new(),
};
let mut functions = Vec::new();
let entry_points = Vec::new();
for inst in module.types_global_values.iter() {
match context.lift_type(inst) {
Ok(value) => {
if let Some(id) = inst.result_id {
context.types.append_id(id, value);
}
continue;
}
Err(InstructionError::WrongOpcode) => {}
Err(e) => panic!("Type lift error: {:?}", e),
}
match context.lift_constant(inst) {
Ok(value) => {
if let Some(id) = inst.result_id {
context.constants.append_id(id, value);
}
continue;
}
Err(InstructionError::WrongOpcode) => {}
Err(e) => panic!("Constant lift error: {:?}", e),
}
}
for fun in module.functions.iter() {
let def =
context.lift_function(fun.def.as_ref().ok_or(ConversionError::MissingFunction)?)?;
//TODO: lift function type instruction
for block in fun.blocks.iter() {
let mut arguments = Vec::new();
for inst in &block.instructions {
match inst.class.opcode {
spirv::Op::Line => {} // skip line decorations
spirv::Op::Phi => {
let ty = context.types.lookup_token(
inst.result_type.ok_or(InstructionError::MissingResult)?,
);
arguments.push(ty);
// Sanity-check if all source variables are of the same type
for op in inst.operands.iter().step_by(2) {
match op {
dr::Operand::IdRef(id) => {
if let Some((_, info)) = context.ops.lookup_safe(*id) {
assert_eq!(Some(ty), info.ty);
} else {
// let (v, info) =
// context.constants.lookup_safe(*id).unwrap();
// TODO: Can't convert Constant back to their lowered type yet!
// assert_eq!(Some(ty), info.ty.as_ref());
}
}
_ => {
return Err(ConversionError::Instruction(
InstructionError::Operand(OperandError::Missing),
))
}
};
}
}
_ => {
if let Some(id) = inst.result_id {
let op = context.lift_op(inst)?;
let types = &context.types;
let (token, entry) = context.ops.append(id, op);
entry.insert(OpInfo {
op: token,
ty: inst.result_type.map(|ty| *types.lookup(ty).1),
});
}
}
}
}
let terminator = context.lift_terminator(
block
.instructions
.last()
.ok_or(ConversionError::MissingTerminator)?,
)?;
context.blocks.append_id(
block.label.as_ref().unwrap().result_id.unwrap(),
module::Block {
arguments,
ops: Vec::new(),
terminator,
},
);
}
let start_label = fun.blocks[0].label.as_ref().unwrap().result_id.unwrap();
let start_block = context.blocks.lookup_token(start_label);
let blocks = mem::replace(&mut context.blocks, LiftStorage::new()).unwrap();
functions.push(module::Function {
control: def.function_control,
result: context.types.append_id(1, Type::Void), //TODO: fty.return_type,
parameters: Vec::new(),
blocks,
start_block,
});
}
Ok(module::Module {
version: match module.header {
Some(ref header) => header.version,
None => return Err(ConversionError::MissingHeader),
},
capabilities: module
.capabilities
.iter()
.map(|cap| context.lift_capability(cap).map(|cap| cap.capability))
.collect::<Result<_, InstructionError>>()?,
extensions: Vec::new(),
ext_inst_imports: Vec::new(),
memory_model: match module.memory_model {
Some(ref mm) => context.lift_memory_model(mm)?,
None => return Err(ConversionError::MissingHeader),
},
entry_points,
types: context.types.unwrap(),
constants: context.constants.unwrap(),
ops: context.ops.unwrap(),
functions,
})
}
fn lookup_jump(&self, destination: spirv::Word) -> module::Jump {
let (_, block) = self.blocks.lookup(destination);
module::Jump {
block: *block,
arguments: Vec::new(), //TODO
}
}
fn lift_constant(&self, inst: &dr::Instruction) -> Result<Constant, InstructionError> {
match inst.class.opcode {
spirv::Op::ConstantTrue => Ok(Constant::Bool(true)),
spirv::Op::ConstantFalse => Ok(Constant::Bool(false)),
spirv::Op::Constant => {
match inst.result_type {
Some(id) => {
let oper = inst
.operands
.first()
.ok_or(InstructionError::Operand(OperandError::Missing))?;
let (value, width) = match *self.types.lookup(id).0 {
Type::Int {
signedness: 0,
width,
} => match *oper {
dr::Operand::LiteralBit32(v) => (Constant::UInt(v), width),
_ => {
return Err(InstructionError::Operand(OperandError::WrongType))
}
},
Type::Int { width, .. } => match *oper {
dr::Operand::LiteralBit32(v) => (Constant::Int(v as i32), width),
_ => {
return Err(InstructionError::Operand(OperandError::WrongType))
}
},
Type::Float { width } => match *oper {
dr::Operand::LiteralBit32(v) => {
(Constant::Float(f32::from_bits(v)), width)
}
_ => {
return Err(InstructionError::Operand(OperandError::WrongType))
}
},
_ => return Err(InstructionError::MissingResult),
};
if width > 32 {
//log::warn!("Constant <id> {} doesn't fit in 32 bits", id);
}
Ok(value)
}
_ => Err(InstructionError::MissingResult),
}
}
spirv::Op::ConstantComposite => {
let mut vec = Vec::with_capacity(inst.operands.len());
for oper in inst.operands.iter() {
let token = match *oper {
dr::Operand::IdRef(v) => self.constants.lookup_token(v),
_ => return Err(InstructionError::Operand(OperandError::WrongType)),
};
vec.push(token);
}
Ok(Constant::Composite(vec))
}
spirv::Op::ConstantSampler => {
if inst.operands.len() < 3 {
return Err(InstructionError::Operand(OperandError::Missing));
}
Ok(Constant::Sampler {
addressing_mode: match inst.operands[0] {
dr::Operand::SamplerAddressingMode(v) => v,
_ => return Err(InstructionError::Operand(OperandError::WrongType)),
},
normalized: match inst.operands[1] {
dr::Operand::LiteralBit32(v) => v != 0,
_ => return Err(InstructionError::Operand(OperandError::WrongType)),
},
filter_mode: match inst.operands[2] {
dr::Operand::SamplerFilterMode(v) => v,
_ => return Err(InstructionError::Operand(OperandError::WrongType)),
},
})
}
spirv::Op::ConstantNull => Ok(Constant::Null),
spirv::Op::ConstantCompositeContinuedINTEL
| spirv::Op::SpecConstantCompositeContinuedINTEL => todo!(),
_ => Err(InstructionError::WrongOpcode),
}
}
}
|
use dlal_component_base::component;
const SENTINEL: u8 = 0xff;
component!(
{"in": ["midi"], "out": ["midi", "audio"]},
["run_size", "sample_rate", "multi"],
{
/*
We have three states, with transitions as follows.
fresh
|
+--> note, when a note is played
- rhythm: start E5 with same velocity
- store the number
- match pitch to note
note
|
+--> note, when a note is played
| - rhythm: forward the velocity
| - store the number
| - match pitch to note
+--> grace, when the stored note ends
- store the velocity
- start silence timer
grace
|
+--> note, when a note is played
| - rhythm: forward the velocity
| - store the number
| - match pitch to note
| - forget the velocity
+--> fresh, when silence timer exceeds grace period
- rhythm: end E5 with stored velocity
- forget the note
- forget the velocity
The pitch is equal to that of the last MIDI message's.
This means:
1) the pitch is equal to the stored note's, unless two note-off events have been received in a row, and
2) the pitch can be controlled in isolation with note-off events.
By remembering and forgetting carefully, we can keep track of state implicitly.
if we remember a velocity { grace }
else if we remember a note { note }
else { fresh }
*/
note: u8,
pitch: f32,
velocity: u8,
silence: f32,
grace: f32,
},
{},
);
impl ComponentTrait for Component {
fn init(&mut self) {
self.note = SENTINEL;
self.velocity = SENTINEL;
self.grace = 0.1;
}
fn run(&mut self) {
// grace
if self.velocity != SENTINEL {
self.silence += self.run_size as f32 / self.sample_rate as f32;
// grace -> fresh
if self.silence > self.grace {
self.multi_midi(&[0x80, 0x40, self.velocity]);
self.note = SENTINEL;
self.velocity = SENTINEL;
}
}
// send pitch as control voltage to outputs
for output in &self.outputs {
if let Some(audio) = output.audio(self.run_size) {
for i in 0..self.run_size {
audio[i] += self.pitch;
}
}
}
}
fn midi(&mut self, msg: &[u8]) {
if msg.len() < 3 {
return;
}
let type_nibble = msg[0] & 0xf0;
if type_nibble == 0x80 || type_nibble == 0x90 && msg[2] == 0 {
// note -> grace
if msg[1] == self.note {
self.velocity = msg[2];
self.silence = 0.0;
}
// pitch
self.pitch = msg[1] as f32 / 128.0;
if std::option_env!("DLAL_SNOOP_RHYMEL").is_some() {
println!("pitch {}", self.pitch);
}
} else if type_nibble == 0x90 {
// fresh -> note
if self.note == SENTINEL && self.velocity == SENTINEL {
self.multi_midi(&[0x90, 0x40, msg[2]]);
self.note = msg[1];
}
// (note, grace) -> note
else {
self.multi_midi(&[0xa0, 0x40, msg[2]]);
self.note = msg[1];
self.velocity = SENTINEL;
}
// pitch
self.pitch = self.note as f32 / 128.0;
if std::option_env!("DLAL_SNOOP_RHYMEL").is_some() {
println!("pitch {}", self.pitch);
}
}
}
}
|
use crate::Result;
use image::GenericImageView;
use std::path::Path;
pub struct Texture {
pub diffuse_texture: wgpu::Texture,
pub diffuse_texture_view: wgpu::TextureView,
pub diffuse_sampler: wgpu::Sampler,
pub diffuse_bind_group: wgpu::BindGroup,
pub diffuse_bind_group_layout: wgpu::BindGroupLayout,
}
impl Texture {
pub fn new(file: &Path, device: &wgpu::Device, queue: &mut wgpu::Queue) -> Result<Self> {
let texture = image::open(file)?;
println!("Dimensions {:?}", texture.dimensions());
println!("Color Type: {:?}", texture.color());
let texture_size = texture.dimensions();
let size = wgpu::Extent3d {
width: texture_size.0,
height: texture_size.1,
depth: 1,
};
let texture_rgba = texture.into_rgba();
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Diffuse"),
size,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_with_data(&texture_rgba, wgpu::BufferUsage::COPY_SRC);
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("texture_buffer_copy_encoder"), });
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
bytes_per_row: 4 * texture_size.0,
rows_per_image: texture_size.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare: wgpu::CompareFunction::Always,
});
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint
},
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler{
comparison: false
}
},
],
label: Some("texture_bind_group_layout")
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
},
],
label: Some("diffuse_bind_group")
});
Ok(Texture {
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
diffuse_bind_group_layout: texture_bind_group_layout,
})
}
}
|
use crate::algorithm::contains::Contains;
use crate::{Line, LineString, Point, Polygon, Rect};
use num_traits::Float;
/// Checks if the geometry A intersects the geometry B.
pub trait Intersects<Rhs = Self> {
/// Checks if the geometry A intersects the geometry B.
///
/// # Examples
///
/// ```
/// use geo::algorithm::intersects::Intersects;
/// use geo::{Coordinate, LineString, Point};
///
/// let linestring = LineString::from(vec![(3., 2.), (7., 6.)]);
///
/// assert!(linestring.intersects(&LineString::from(vec![(3., 4.), (8., 4.)])));
/// assert!(!linestring.intersects(&LineString::from(vec![(9., 2.), (11., 5.)])));
/// ```
fn intersects(&self, rhs: &Rhs) -> bool;
}
impl<T> Intersects<Point<T>> for Line<T>
where
T: Float,
{
fn intersects(&self, p: &Point<T>) -> bool {
let tx = if self.dx() == T::zero() {
None
} else {
Some((p.x() - self.start.x) / self.dx())
};
let ty = if self.dy() == T::zero() {
None
} else {
Some((p.y() - self.start.y) / self.dy())
};
match (tx, ty) {
(None, None) => {
// Degenerate line
p.0 == self.start
}
(Some(t), None) => {
// Horizontal line
p.y() == self.start.y && T::zero() <= t && t <= T::one()
}
(None, Some(t)) => {
// Vertical line
p.x() == self.start.x && T::zero() <= t && t <= T::one()
}
(Some(t_x), Some(t_y)) => {
// All other lines
(t_x - t_y).abs() <= T::epsilon() && T::zero() <= t_x && t_x <= T::one()
}
}
}
}
impl<T> Intersects<Line<T>> for Point<T>
where
T: Float,
{
fn intersects(&self, line: &Line<T>) -> bool {
line.intersects(self)
}
}
impl<T> Intersects<Line<T>> for Line<T>
where
T: Float,
{
fn intersects(&self, line: &Line<T>) -> bool {
// Using Cramer's Rule:
// https://en.wikipedia.org/wiki/Intersection_%28Euclidean_geometry%29#Two_line_segments
let a1 = self.dx();
let a2 = self.dy();
let b1 = -line.dx();
let b2 = -line.dy();
let c1 = line.start.x - self.start.x;
let c2 = line.start.y - self.start.y;
let d = a1 * b2 - a2 * b1;
if d == T::zero() {
let (self_start, self_end) = self.points();
let (other_start, other_end) = line.points();
// lines are parallel
// return true iff at least one endpoint intersects the other line
self_start.intersects(line)
|| self_end.intersects(line)
|| other_start.intersects(self)
|| other_end.intersects(self)
} else {
let s = (c1 * b2 - c2 * b1) / d;
let t = (a1 * c2 - a2 * c1) / d;
(T::zero() <= s) && (s <= T::one()) && (T::zero() <= t) && (t <= T::one())
}
}
}
impl<T> Intersects<LineString<T>> for Line<T>
where
T: Float,
{
fn intersects(&self, linestring: &LineString<T>) -> bool {
linestring.lines().any(|line| self.intersects(&line))
}
}
impl<T> Intersects<Line<T>> for LineString<T>
where
T: Float,
{
fn intersects(&self, line: &Line<T>) -> bool {
line.intersects(self)
}
}
impl<T> Intersects<Polygon<T>> for Line<T>
where
T: Float,
{
fn intersects(&self, p: &Polygon<T>) -> bool {
p.exterior().intersects(self)
|| p.interiors().iter().any(|inner| inner.intersects(self))
|| p.contains(&self.start_point())
|| p.contains(&self.end_point())
}
}
impl<T> Intersects<Line<T>> for Polygon<T>
where
T: Float,
{
fn intersects(&self, line: &Line<T>) -> bool {
line.intersects(self)
}
}
impl<T> Intersects<LineString<T>> for LineString<T>
where
T: Float,
{
// See: https://github.com/brandonxiang/geojson-python-utils/blob/33b4c00c6cf27921fb296052d0c0341bd6ca1af2/geojson_utils.py
fn intersects(&self, linestring: &LineString<T>) -> bool {
if self.0.is_empty() || linestring.0.is_empty() {
return false;
}
for a in self.lines() {
for b in linestring.lines() {
let u_b = b.dy() * a.dx() - b.dx() * a.dy();
if u_b == T::zero() {
continue;
}
let ua_t = b.dx() * (a.start.y - b.start.y) - b.dy() * (a.start.x - b.start.x);
let ub_t = a.dx() * (a.start.y - b.start.y) - a.dy() * (a.start.x - b.start.x);
let u_a = ua_t / u_b;
let u_b = ub_t / u_b;
if (T::zero() <= u_a)
&& (u_a <= T::one())
&& (T::zero() <= u_b)
&& (u_b <= T::one())
{
return true;
}
}
}
false
}
}
impl<T> Intersects<LineString<T>> for Polygon<T>
where
T: Float,
{
fn intersects(&self, linestring: &LineString<T>) -> bool {
// line intersects inner or outer polygon edge
if self.exterior().intersects(linestring)
|| self
.interiors()
.iter()
.any(|inner| inner.intersects(linestring))
{
true
} else {
// or if it's contained in the polygon
linestring.points_iter().any(|point| self.contains(&point))
}
}
}
impl<T> Intersects<Polygon<T>> for LineString<T>
where
T: Float,
{
fn intersects(&self, polygon: &Polygon<T>) -> bool {
polygon.intersects(self)
}
}
// helper function for intersection check
fn value_in_range<T>(value: T, min: T, max: T) -> bool
where
T: Float,
{
(value >= min) && (value <= max)
}
impl<T> Intersects<Rect<T>> for Rect<T>
where
T: Float,
{
fn intersects(&self, bounding_rect: &Rect<T>) -> bool {
let x_overlap = value_in_range(
self.min().x,
bounding_rect.min().x,
bounding_rect.max().x
) || value_in_range(
bounding_rect.min().x,
self.min().x,
self.max().x
);
let y_overlap = value_in_range(
self.min().y,
bounding_rect.min().y,
bounding_rect.max().y
) || value_in_range(
bounding_rect.min().y,
self.min().y,
self.max().y
);
x_overlap && y_overlap
}
}
impl<T> Intersects<Polygon<T>> for Rect<T>
where
T: Float,
{
fn intersects(&self, polygon: &Polygon<T>) -> bool {
polygon.intersects(self)
}
}
impl<T> Intersects<Rect<T>> for Polygon<T>
where
T: Float,
{
fn intersects(&self, bounding_rect: &Rect<T>) -> bool {
let p = Polygon::new(
LineString::from(vec![
(bounding_rect.min().x, bounding_rect.min().y),
(bounding_rect.min().x, bounding_rect.max().y),
(bounding_rect.max().x, bounding_rect.max().y),
(bounding_rect.max().x, bounding_rect.min().y),
(bounding_rect.min().x, bounding_rect.min().y),
]),
vec![],
);
self.intersects(&p)
}
}
impl<T> Intersects<Polygon<T>> for Polygon<T>
where
T: Float,
{
fn intersects(&self, polygon: &Polygon<T>) -> bool {
// self intersects (or contains) any line in polygon
self.intersects(polygon.exterior()) ||
polygon.interiors().iter().any(|inner_line_string| self.intersects(inner_line_string)) ||
// self is contained inside polygon
polygon.intersects(self.exterior())
}
}
#[cfg(test)]
mod test {
use crate::algorithm::intersects::Intersects;
use crate::{line_string, polygon, Coordinate, Line, LineString, Point, Polygon, Rect};
/// Tests: intersection LineString and LineString
#[test]
fn empty_linestring1_test() {
let linestring = line_string![(x: 3., y: 2.), (x: 7., y: 6.)];
assert!(!line_string![].intersects(&linestring));
}
#[test]
fn empty_linestring2_test() {
let linestring = line_string![(x: 3., y: 2.), (x: 7., y: 6.)];
assert!(!linestring.intersects(&LineString(Vec::new())));
}
#[test]
fn empty_all_linestring_test() {
let empty: LineString<f64> = line_string![];
assert!(!empty.intersects(&empty));
}
#[test]
fn intersect_linestring_test() {
let ls1 = line_string![(x: 3., y: 2.), (x: 7., y: 6.)];
let ls2 = line_string![(x: 3., y: 4.), (x: 8., y: 4.)];
assert!(ls1.intersects(&ls2));
}
#[test]
fn parallel_linestrings_test() {
let ls1 = line_string![(x: 3., y: 2.), (x: 7., y: 6.)];
let ls2 = line_string![(x: 3., y: 1.), (x: 7., y: 5.)];
assert!(!ls1.intersects(&ls2));
}
/// Tests: intersection LineString and Polygon
#[test]
fn linestring_in_polygon_test() {
let poly = polygon![
(x: 0., y: 0.),
(x: 5., y: 0.),
(x: 5., y: 6.),
(x: 0., y: 6.),
(x: 0., y: 0.),
];
let ls = line_string![(x: 2., y: 2.), (x: 3., y: 3.)];
assert!(poly.intersects(&ls));
}
#[test]
fn linestring_on_boundary_polygon_test() {
let poly = Polygon::new(
LineString::from(vec![(0., 0.), (5., 0.), (5., 6.), (0., 6.), (0., 0.)]),
Vec::new(),
);
assert!(poly.intersects(&LineString::from(vec![(0., 0.), (5., 0.)])));
assert!(poly.intersects(&LineString::from(vec![(5., 0.), (5., 6.)])));
assert!(poly.intersects(&LineString::from(vec![(5., 6.), (0., 6.)])));
assert!(poly.intersects(&LineString::from(vec![(0., 6.), (0., 0.)])));
}
#[test]
fn intersect_linestring_polygon_test() {
let poly = Polygon::new(
LineString::from(vec![(0., 0.), (5., 0.), (5., 6.), (0., 6.), (0., 0.)]),
Vec::new(),
);
assert!(poly.intersects(&LineString::from(vec![(2., 2.), (6., 6.)])));
}
#[test]
fn linestring_outside_polygon_test() {
let poly = Polygon::new(
LineString::from(vec![(0., 0.), (5., 0.), (5., 6.), (0., 6.), (0., 0.)]),
Vec::new(),
);
assert!(!poly.intersects(&LineString::from(vec![(7., 2.), (9., 4.)])));
}
#[test]
fn linestring_in_inner_polygon_test() {
let e = LineString::from(vec![(0., 0.), (5., 0.), (5., 6.), (0., 6.), (0., 0.)]);
let v = vec![LineString::from(vec![
(1., 1.),
(4., 1.),
(4., 4.),
(1., 4.),
(1., 1.),
])];
let poly = Polygon::new(e, v);
assert!(!poly.intersects(&LineString::from(vec![(2., 2.), (3., 3.)])));
assert!(poly.intersects(&LineString::from(vec![(2., 2.), (4., 4.)])));
}
#[test]
fn linestring_traverse_polygon_test() {
let e = LineString::from(vec![(0., 0.), (5., 0.), (5., 6.), (0., 6.), (0., 0.)]);
let v = vec![LineString::from(vec![
(1., 1.),
(4., 1.),
(4., 4.),
(1., 4.),
(1., 1.),
])];
let poly = Polygon::new(e, v);
assert!(poly.intersects(&LineString::from(vec![(2., 0.5), (2., 5.)])));
}
#[test]
fn linestring_in_inner_with_2_inner_polygon_test() {
// (8,9)
// (2,8) | (14,8)
// ------------------------------------|------------------------------------------
// | | |
// | (4,7) (6,7) | |
// | ------------------ | (11,7) |
// | | | |
// | (4,6) (7,6) | (9,6) | (12,6) |
// | ---------------------- | ----------------|--------- |
// | | | | | | | |
// | | (6,5) | | | | | |
// | | / | | | | | |
// | | / | | | | | |
// | | (5,4) | | | | | |
// | | | | | | | |
// | ---------------------- | ----------------|--------- |
// | (4,3) (7,3) | (9,3) | (12,3) |
// | | (11,2.5) |
// | | |
// ------------------------------------|------------------------------------------
// (2,2) | (14,2)
// (8,1)
//
let e = LineString::from(vec![(2., 2.), (14., 2.), (14., 8.), (2., 8.), (2., 2.)]);
let v = vec![
LineString::from(vec![(4., 3.), (7., 3.), (7., 6.), (4., 6.), (4., 3.)]),
LineString::from(vec![(9., 3.), (12., 3.), (12., 6.), (9., 6.), (9., 3.)]),
];
let poly = Polygon::new(e, v);
assert!(!poly.intersects(&LineString::from(vec![(5., 4.), (6., 5.)])));
assert!(poly.intersects(&LineString::from(vec![(11., 2.5), (11., 7.)])));
assert!(poly.intersects(&LineString::from(vec![(4., 7.), (6., 7.)])));
assert!(poly.intersects(&LineString::from(vec![(8., 1.), (8., 9.)])));
}
#[test]
fn polygons_do_not_intersect() {
let p1 = Polygon::new(
LineString::from(vec![(1., 3.), (3., 3.), (3., 5.), (1., 5.), (1., 3.)]),
Vec::new(),
);
let p2 = Polygon::new(
LineString::from(vec![
(10., 30.),
(30., 30.),
(30., 50.),
(10., 50.),
(10., 30.),
]),
Vec::new(),
);
assert!(!p1.intersects(&p2));
assert!(!p2.intersects(&p1));
}
#[test]
fn polygons_overlap() {
let p1 = Polygon::new(
LineString::from(vec![(1., 3.), (3., 3.), (3., 5.), (1., 5.), (1., 3.)]),
Vec::new(),
);
let p2 = Polygon::new(
LineString::from(vec![(2., 3.), (4., 3.), (4., 7.), (2., 7.), (2., 3.)]),
Vec::new(),
);
assert!(p1.intersects(&p2));
assert!(p2.intersects(&p1));
}
#[test]
fn polygon_contained() {
let p1 = Polygon::new(
LineString::from(vec![(1., 3.), (4., 3.), (4., 6.), (1., 6.), (1., 3.)]),
Vec::new(),
);
let p2 = Polygon::new(
LineString::from(vec![(2., 4.), (3., 4.), (3., 5.), (2., 5.), (2., 4.)]),
Vec::new(),
);
assert!(p1.intersects(&p2));
assert!(p2.intersects(&p1));
}
#[test]
fn polygons_conincident() {
let p1 = Polygon::new(
LineString::from(vec![(1., 3.), (4., 3.), (4., 6.), (1., 6.), (1., 3.)]),
Vec::new(),
);
let p2 = Polygon::new(
LineString::from(vec![(1., 3.), (4., 3.), (4., 6.), (1., 6.), (1., 3.)]),
Vec::new(),
);
assert!(p1.intersects(&p2));
assert!(p2.intersects(&p1));
}
#[test]
fn polygon_intersects_bounding_rect_test() {
// Polygon poly =
//
// (0,8) (12,8)
// ┌──────────────────────┐
// │ (7,7) (11,7) │
// │ ┌──────┐ │
// │ │ │ │
// │ │(hole)│ │
// │ │ │ │
// │ │ │ │
// │ └──────┘ │
// │ (7,4) (11,4) │
// │ │
// │ │
// │ │
// │ │
// │ │
// └──────────────────────┘
// (0,0) (12,0)
let poly = Polygon::new(
LineString::from(vec![(0., 0.), (12., 0.), (12., 8.), (0., 8.), (0., 0.)]),
vec![LineString::from(vec![
(7., 4.),
(11., 4.),
(11., 7.),
(7., 7.),
(7., 4.),
])],
);
let b1 = Rect::new(Coordinate { x: 11., y: 1. }, Coordinate { x: 13., y: 2. });
let b2 = Rect::new(Coordinate { x: 2., y: 2. }, Coordinate { x: 8., y: 5. });
let b3 = Rect::new(Coordinate { x: 8., y: 5. }, Coordinate { x: 10., y: 6. });
let b4 = Rect::new(Coordinate { x: 1., y: 1. }, Coordinate { x: 3., y: 3. });
// overlaps
assert!(poly.intersects(&b1));
// contained in exterior, overlaps with hole
assert!(poly.intersects(&b2));
// completely contained in the hole
assert!(!poly.intersects(&b3));
// completely contained in the polygon
assert!(poly.intersects(&b4));
// conversely,
assert!(b1.intersects(&poly));
assert!(b2.intersects(&poly));
assert!(!b3.intersects(&poly));
assert!(b4.intersects(&poly));
}
#[test]
fn bounding_rect_test() {
let bounding_rect_xl = Rect::new(
Coordinate { x: -100., y: -200. },
Coordinate { x: 100., y: 200. },
);
let bounding_rect_sm = Rect::new(
Coordinate { x: -10., y: -20. },
Coordinate { x: 10., y: 20. },
);
let bounding_rect_s2 =
Rect::new(Coordinate { x: 0., y: 0. }, Coordinate { x: 20., y: 30. });
// confirmed using GEOS
assert_eq!(true, bounding_rect_xl.intersects(&bounding_rect_sm));
assert_eq!(true, bounding_rect_sm.intersects(&bounding_rect_xl));
assert_eq!(true, bounding_rect_sm.intersects(&bounding_rect_s2));
assert_eq!(true, bounding_rect_s2.intersects(&bounding_rect_sm));
}
#[test]
fn rect_interesection_consistent_with_poly_intersection_test() {
let bounding_rect_xl = Rect::new(
Coordinate { x: -100., y: -200. },
Coordinate { x: 100., y: 200. },
);
let bounding_rect_sm = Rect::new(
Coordinate { x: -10., y: -20. },
Coordinate { x: 10., y: 20. },
);
let bounding_rect_s2 =
Rect::new(Coordinate { x: 0., y: 0. }, Coordinate { x: 20., y: 30. });
assert_eq!(true, bounding_rect_xl.to_polygon().intersects(&bounding_rect_sm));
assert_eq!(true, bounding_rect_xl.intersects(&bounding_rect_sm.to_polygon()));
assert_eq!(true, bounding_rect_xl.to_polygon().intersects(&bounding_rect_sm.to_polygon()));
assert_eq!(true, bounding_rect_sm.to_polygon().intersects(&bounding_rect_xl));
assert_eq!(true, bounding_rect_sm.intersects(&bounding_rect_xl.to_polygon()));
assert_eq!(true, bounding_rect_sm.to_polygon().intersects(&bounding_rect_xl.to_polygon()));
assert_eq!(true, bounding_rect_sm.to_polygon().intersects(&bounding_rect_s2));
assert_eq!(true, bounding_rect_sm.intersects(&bounding_rect_s2.to_polygon()));
assert_eq!(true, bounding_rect_sm.to_polygon().intersects(&bounding_rect_s2.to_polygon()));
assert_eq!(true, bounding_rect_s2.to_polygon().intersects(&bounding_rect_sm));
assert_eq!(true, bounding_rect_s2.intersects(&bounding_rect_sm.to_polygon()));
assert_eq!(true, bounding_rect_s2.to_polygon().intersects(&bounding_rect_sm.to_polygon()));
}
#[test]
fn point_intersects_line_test() {
let p0 = Point::new(2., 4.);
// vertical line
let line1 = Line::from([(2., 0.), (2., 5.)]);
// point on line, but outside line segment
let line2 = Line::from([(0., 6.), (1.5, 4.5)]);
// point on line
let line3 = Line::from([(0., 6.), (3., 3.)]);
// point above line with positive slope
let line4 = Line::from([(1., 2.), (5., 3.)]);
// point below line with positive slope
let line5 = Line::from([(1., 5.), (5., 6.)]);
// point above line with negative slope
let line6 = Line::from([(1., 2.), (5., -3.)]);
// point below line with negative slope
let line7 = Line::from([(1., 6.), (5., 5.)]);
assert!(line1.intersects(&p0));
assert!(p0.intersects(&line1));
assert!(!line2.intersects(&p0));
assert!(!p0.intersects(&line2));
assert!(line3.intersects(&p0));
assert!(p0.intersects(&line3));
assert!(!line4.intersects(&p0));
assert!(!p0.intersects(&line4));
assert!(!line5.intersects(&p0));
assert!(!p0.intersects(&line5));
assert!(!line6.intersects(&p0));
assert!(!p0.intersects(&line6));
assert!(!line7.intersects(&p0));
assert!(!p0.intersects(&line7));
}
#[test]
fn line_intersects_line_test() {
let line0 = Line::from([(0., 0.), (3., 4.)]);
let line1 = Line::from([(2., 0.), (2., 5.)]);
let line2 = Line::from([(0., 7.), (5., 4.)]);
let line3 = Line::from([(0., 0.), (-3., -4.)]);
assert!(line0.intersects(&line0));
assert!(line0.intersects(&line1));
assert!(!line0.intersects(&line2));
assert!(line0.intersects(&line3));
assert!(line1.intersects(&line0));
assert!(line1.intersects(&line1));
assert!(!line1.intersects(&line2));
assert!(!line1.intersects(&line3));
assert!(!line2.intersects(&line0));
assert!(!line2.intersects(&line1));
assert!(line2.intersects(&line2));
assert!(!line1.intersects(&line3));
}
#[test]
fn line_intersects_linestring_test() {
let line0 = Line::from([(0., 0.), (3., 4.)]);
let linestring0 = LineString::from(vec![(0., 1.), (1., 0.), (2., 0.)]);
let linestring1 = LineString::from(vec![(0.5, 0.2), (1., 0.), (2., 0.)]);
assert!(line0.intersects(&linestring0));
assert!(!line0.intersects(&linestring1));
assert!(linestring0.intersects(&line0));
assert!(!linestring1.intersects(&line0));
}
#[test]
fn line_intersects_polygon_test() {
let line0 = Line::from([(0.5, 0.5), (2., 1.)]);
let poly0 = Polygon::new(
LineString::from(vec![(0., 0.), (1., 2.), (1., 0.), (0., 0.)]),
vec![],
);
let poly1 = Polygon::new(
LineString::from(vec![(1., -1.), (2., -1.), (2., -2.), (1., -1.)]),
vec![],
);
// line contained in the hole
let poly2 = Polygon::new(
LineString::from(vec![(-1., -1.), (-1., 10.), (10., -1.), (-1., -1.)]),
vec![LineString::from(vec![
(0., 0.),
(3., 4.),
(3., 0.),
(0., 0.),
])],
);
assert!(line0.intersects(&poly0));
assert!(poly0.intersects(&line0));
assert!(!line0.intersects(&poly1));
assert!(!poly1.intersects(&line0));
assert!(!line0.intersects(&poly2));
assert!(!poly2.intersects(&line0));
}
#[test]
// See https://github.com/georust/geo/issues/419
fn rect_test_419() {
let a = Rect::new(
Coordinate {
x: 9.228515625,
y: 46.83013364044739,
},
Coordinate {
x: 9.2724609375,
y: 46.86019101567026,
},
);
let b = Rect::new(
Coordinate {
x: 9.17953,
y: 46.82018,
},
Coordinate {
x: 9.26309,
y: 46.88099,
},
);
assert!(a.intersects(&b));
assert!(b.intersects(&a));
}
}
|
use digits_iterator::*;
use itertools::Itertools;
fn is_not_decreasing(pwd: &Vec<u8>) -> bool {
pwd.windows(2).all(|w| w[0] <= w[1])
}
fn has_adjacent_digits(pwd: &Vec<u8>, limit_repeating_digits: bool) -> bool {
let split: Vec<Vec<&u8>> = pwd
.iter()
.group_by(|&x| x)
.into_iter()
.map(|(_, r)| r.collect())
.collect();
if split.len() < pwd.len() {
//we have repeating elements
if !limit_repeating_digits {
return true;
}
return split.iter().any(|s| s.len() == 2);
}
false
}
fn is_valid_password(pwd: u32, limit_repeating_digits: bool) -> bool {
let digits = pwd.digits().collect();
is_not_decreasing(&digits) && has_adjacent_digits(&digits, limit_repeating_digits)
}
fn main() {
println!(
"Part 1: {:?}",
(245318..=765747)
.filter(|p| is_valid_password(*p, false))
.count()
);
println!(
"Part 2: {:?}",
(245318..=765747)
.filter(|p| is_valid_password(*p, true))
.count()
);
}
#[test]
fn part_1() {
assert_eq!(is_valid_password(111111, false), true);
assert_eq!(is_valid_password(223450, false), false);
assert_eq!(is_valid_password(123789, false), false);
}
#[test]
fn part_2() {
assert_eq!(is_valid_password(112233, true), true);
assert_eq!(is_valid_password(123444, true), false);
assert_eq!(is_valid_password(111122, true), true);
}
|
extern crate kafka;
extern crate getopts;
extern crate env_logger;
use std::{env, io, fmt, process};
use kafka::consumer::{Consumer, FetchOffset};
/// This is a very simple command line application reading from a
/// specific kafka topic and dumping the messages to standard output.
fn main() {
env_logger::init().unwrap();
let cfg = match Config::from_cmdline() {
Ok(cfg) => cfg,
Err(e) => {
println!("{}", e);
process::exit(1);
}
};
if let Err(e) = process(cfg) {
println!("{}", e);
process::exit(1);
}
}
fn process(cfg: Config) -> Result<(), Error> {
let mut c =
try!(Consumer::from_hosts(cfg.brokers, cfg.group, cfg.topic)
.with_fetch_max_wait_time(100)
.with_fetch_min_bytes(1_000)
.with_fetch_max_bytes_per_partition(100_000)
.with_fallback_offset(FetchOffset::Earliest)
.with_retry_max_bytes_limit(1_000_000)
.create());
let do_commit = !cfg.no_commit;
loop {
for ms in try!(c.poll()).iter() {
for m in ms.messages() {
let s = String::from_utf8_lossy(m.value);
println!("{}:{}@{}: {}", ms.topic(), ms.partition(), m.offset, s.trim());
}
c.consume_messageset(ms);
}
if do_commit {
try!(c.commit_consumed());
}
}
}
// --------------------------------------------------------------------
enum Error {
Kafka(kafka::error::Error),
Io(io::Error),
Literal(String),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Error::Kafka(ref e) => write!(f, "kafka-error: {}", e),
&Error::Io(ref e) => write!(f, "io-error: {}", e),
&Error::Literal(ref s) => write!(f, "{}", s),
}
}
}
impl From<kafka::error::Error> for Error {
fn from(e: kafka::error::Error) -> Self { Error::Kafka(e) }
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self { Error::Io(e) }
}
// --------------------------------------------------------------------
struct Config {
brokers: Vec<String>,
group: String,
topic: String,
no_commit: bool,
}
impl Config {
fn from_cmdline() -> Result<Config, Error> {
let args: Vec<_> = env::args().collect();
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "Print this help screen");
opts.optopt("", "brokers", "Specify kafka brokers (comma separated)", "HOSTS");
opts.optopt("", "topic", "Specify target topic", "NAME");
opts.optopt("", "group", "Specify the group_id file", "NAME");
opts.optflag("", "no-commit", "Do not commit consumed messages");
let m = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => return Err(Error::Literal(e.to_string())),
};
if m.opt_present("help") {
let brief = format!("{} [options]", args[0]);
return Err(Error::Literal(opts.usage(&brief)));
}
Ok(Config {
brokers: m.opt_str("brokers")
.unwrap_or_else(|| "localhost:9092".to_owned())
.split(',')
.map(|s| s.trim().to_owned())
.collect(),
group: m.opt_str("group")
.unwrap_or_else(|| "my-group".to_owned()),
topic: m.opt_str("topic")
.unwrap_or_else(|| "my-topic".to_owned()),
no_commit: m.opt_present("no-commit"),
})
}
}
|
// 6502.rs for nes-emu
// looking to do a lot of high level stuff regarding the 6502 in here
// will put instructions in different .rs files.
use super::adc;
use super::and;
use super::asl;
use super::branch;
use super::compare;
use super::flags;
use super::increment_decrement;
use super::jumps;
use super::load_store;
use super::lsr;
use super::misc_instructions;
use super::or;
use super::rol;
use super::ror;
use super::sub;
use crate::memory::RAM;
#[derive(Debug)]
pub struct Nes6502 {
accumulator: u8,
x: u8,
y: u8,
status_flags: u8, // only need 7 bits of this
stack_pointer: u8,
pc_counter: u16,
cycles_until_next: u8,
total_cycles: u128,
nmi_vector: u16,
irq_vector: u16
}
impl Nes6502 {
pub fn new() -> Self {
Nes6502 {
accumulator: 0,
x: 0,
y: 0,
status_flags: 0x24, // only need 7 bits of this
stack_pointer: 0xFD,
pc_counter: 0,
cycles_until_next: 0,
total_cycles: 7,
nmi_vector: 0,
irq_vector: 0,
}
}
pub fn init(&mut self, ram: &mut RAM) {
//nmi vector
self.irq_vector = ram.read_mem_address(0xFFFE);
//irq vector
self.nmi_vector = ram.read_mem_address(0xFFFA);
// reset vector for pc counter
self.pc_counter = ram.read_mem_address(0xFFFC);
}
pub fn run(&mut self, ram: &mut RAM) {
if self.cycles_until_next == 0 {
// get next opcode
let opcode = ram.read_mem_value(self.pc_counter);
self.decode_instruction(opcode, ram);
} else {
self.cycles_until_next -= 1;
self.total_cycles += 1;
}
}
fn decode_instruction(&mut self, opcode: u8, ram: &mut RAM) {
//println!("{:#x} {:#04x} A:{:#04x} X:{:#04x} Y:{:#04x} P:{:#04x} SP:{:#04x} cycles:{}", self.pc_counter, opcode, self.accumulator, self.x, self.y, self.status_flags, self.stack_pointer, self.total_cycles);
match opcode {
// -------------------------------------------------------------------
// add with carry start ----------------------------------------------
0x69 => {
// immediate
let imm_value = ram.read_mem_value(self.pc_counter + 1);
adc::adc_immediate(
imm_value,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x65 => {
// zero page
let zero_page_addr = ram.read_mem_value(self.pc_counter + 1);
adc::adc_zero_page(
zero_page_addr,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x75 => {
// zero page x
let zero_page_addr = ram.read_mem_value(self.pc_counter + 1);
adc::adc_zero_page_x(
zero_page_addr,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x6D => {
// absolute
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
adc::adc_absolute(
absolute_addr,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x7D => {
// absolute x
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
adc::adc_absolute_reg(
absolute_addr,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x79 => {
// absolute y
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
adc::adc_absolute_reg(
absolute_addr,
self.y,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x61 => {
// indirect x
let addr = ram.read_mem_value(self.pc_counter + 1);
adc::adc_indexed_indirect(
addr,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x71 => {
// indirect y
let addr = ram.read_mem_value(self.pc_counter + 1);
adc::adc_indirect_indexed(
addr,
self.y,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
// add with carry end -----------------------------------------
// ------------------------------------------------------------
// logical and start-------------------------------------------
0x29 => {
// immediate
let imm_value = ram.read_mem_value(self.pc_counter + 1);
and::and_immediate(
imm_value,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
&mut self.cycles_until_next,
)
}
0x25 => {
// zero page
let zero_page_addr = ram.read_mem_value(self.pc_counter + 1);
and::and_zero_page(
zero_page_addr,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x35 => {
// zero page x
let zero_page_addr = ram.read_mem_value(self.pc_counter + 1);
and::and_zero_page_x(
zero_page_addr,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x2D => {
// absolute
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
and::and_absolute(
absolute_addr,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x3D => {
// absolute x
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
and::and_absolute_reg(
absolute_addr,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x39 => {
// absolute y
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
and::and_absolute_reg(
absolute_addr,
self.y,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x21 => {
// indirect x
let addr = ram.read_mem_value(self.pc_counter + 1);
and::and_indexed_indirect(
addr,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x31 => {
// indirect y
let addr = ram.read_mem_value(self.pc_counter + 1);
and::and_indirect_indexed(
addr,
self.y,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
// logical and end---------------------------------------------
// ------------------------------------------------------------
// arithmetic shift left start
0x0A => {
// absolute
asl::asl_accumulator(
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x06 => {
// zero page
let zero_page_addr = ram.read_mem_value(self.pc_counter + 1);
asl::asl_zero_page(
&mut self.pc_counter,
zero_page_addr,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x16 => {
// zero page x
let zero_page_addr = ram.read_mem_value(self.pc_counter + 1);
asl::asl_zero_page_x(
&mut self.pc_counter,
self.x,
zero_page_addr,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x0E => {
// absolute
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
asl::asl_absolute(
&mut self.pc_counter,
absolute_addr,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x1E => {
// absolute x
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
asl::asl_absolute_x(
&mut self.pc_counter,
self.x,
absolute_addr,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
// arithmetic shift left end
// ------------------------------------------------------------
// branch instructions start
0x90 => {
// branch if carry clear
let branch_addr = ram.read_mem_value(self.pc_counter + 1);
branch::branch_if_carry_clear(
self.status_flags,
&mut self.pc_counter,
branch_addr as i8,
&mut self.cycles_until_next,
);
}
0xB0 => {
//branch if carry set
let branch_addr = ram.read_mem_value(self.pc_counter + 1);
branch::branch_if_carry_set(
self.status_flags,
&mut self.pc_counter,
branch_addr as i8,
&mut self.cycles_until_next,
);
}
0xF0 => {
//branch if equal
let branch_addr = ram.read_mem_value(self.pc_counter + 1);
branch::branch_if_equal(
self.status_flags,
&mut self.pc_counter,
branch_addr as i8,
&mut self.cycles_until_next,
);
}
0x30 => {
// branch if minus
let branch_addr = ram.read_mem_value(self.pc_counter + 1);
branch::branch_if_minus(
self.status_flags,
&mut self.pc_counter,
branch_addr as i8,
&mut self.cycles_until_next,
);
}
0xD0 => {
//branch if not equal
let branch_addr = ram.read_mem_value(self.pc_counter + 1);
branch::branch_if_not_equal(
self.status_flags,
&mut self.pc_counter,
branch_addr as i8,
&mut self.cycles_until_next,
);
}
0x10 => {
//branch if positive
let branch_addr = ram.read_mem_value(self.pc_counter + 1);
branch::branch_if_positive(
self.status_flags,
&mut self.pc_counter,
branch_addr as i8,
&mut self.cycles_until_next,
);
}
0x50 => {
//branch if overflow clear
let branch_addr = ram.read_mem_value(self.pc_counter + 1);
branch::branch_if_overflow_clear(
self.status_flags,
&mut self.pc_counter,
branch_addr as i8,
&mut self.cycles_until_next,
);
}
0x70 => {
//branch if overflow set
let branch_addr = ram.read_mem_value(self.pc_counter + 1);
branch::branch_if_overflow_set(
self.status_flags,
&mut self.pc_counter,
branch_addr as i8,
&mut self.cycles_until_next,
);
}
// branch instruction end
// -----------------------------------------------------------
// bit test
0x24 => {
// bit test zero page
let zero_page_addr = ram.read_mem_value(self.pc_counter + 1);
misc_instructions::bittest_zero_page(
&mut self.pc_counter,
self.accumulator,
zero_page_addr,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x2C => {
// bit test absolute
let addr = ram.read_mem_address(self.pc_counter + 1);
misc_instructions::bittest_absolute(
&mut self.pc_counter,
self.accumulator,
addr,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x00 => {
misc_instructions::break_force_interrupt(
&mut self.pc_counter,
&mut self.status_flags,
&mut self.stack_pointer,
ram,
&mut self.cycles_until_next,
);
}
//-----------------------------
// clear flag instructions start
0x18 => {
flags::clear_carry(&mut self.status_flags);
self.pc_counter += 1;
self.cycles_until_next = 2;
}
0x58 => {
flags::clear_interrupt_disable(&mut self.status_flags);
self.pc_counter += 1;
self.cycles_until_next = 2;
}
0xB8 => {
flags::clear_overflow(&mut self.status_flags);
self.pc_counter += 1;
self.cycles_until_next = 2;
}
// clear flag instructions end
//-----------------------------
// set flag instructions start
0x38 => {
flags::set_carry(&mut self.status_flags);
self.pc_counter += 1;
self.cycles_until_next = 2;
}
0x78 => {
flags::set_interrupt_disable(&mut self.status_flags);
self.pc_counter += 1;
self.cycles_until_next = 2;
}
0xf8 => {
flags::set_decimal(&mut self.status_flags);
self.pc_counter += 1;
self.cycles_until_next = 2;
}
0xd8 => {
flags::clear_decimal(&mut self.status_flags);
self.pc_counter += 1;
self.cycles_until_next = 2;
}
// set flag instructions end
//----------------------------
// compare instructions start
0xC9 => {
// immediate
let imm_value = ram.read_mem_value(self.pc_counter + 1);
compare::comp_value_immediate(
&mut self.pc_counter,
self.accumulator,
imm_value,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xC5 => {
// zero page
let operand = ram.read_mem_value(self.pc_counter + 1);
compare::comp_value_zero_page(
&mut self.pc_counter,
self.accumulator,
operand,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xD5 => {
// zero page x
let operand = ram.read_mem_value(self.pc_counter + 1);
compare::comp_value_zero_page_x(
&mut self.pc_counter,
self.accumulator,
operand,
self.x,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xCD => {
// absolute
let operand = ram.read_mem_address(self.pc_counter + 1);
compare::comp_value_absolute(
&mut self.pc_counter,
self.accumulator,
operand,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xDD => {
// absolute x
let operand = ram.read_mem_address(self.pc_counter + 1);
compare::comp_value_absolute_reg(
&mut self.pc_counter,
self.accumulator,
operand,
self.x,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xD9 => {
// absolute y
let operand = ram.read_mem_address(self.pc_counter + 1);
compare::comp_value_absolute_reg(
&mut self.pc_counter,
self.accumulator,
operand,
self.y,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xC1 => {
// indirect x
let operand = ram.read_mem_value(self.pc_counter + 1);
compare::comp_value_indexed_indirect(
&mut self.pc_counter,
self.accumulator,
operand,
self.x,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xD1 => {
// indirect y
let operand = ram.read_mem_value(self.pc_counter + 1);
compare::comp_value_indirect_indexed(
&mut self.pc_counter,
self.accumulator,
operand,
self.y,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xE0 => {
// compare x reg immediate
let imm_value = ram.read_mem_value(self.pc_counter + 1);
compare::comp_value_immediate(
&mut self.pc_counter,
self.x,
imm_value,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xE4 => {
// compare x reg zero page
let operand = ram.read_mem_value(self.pc_counter + 1);
compare::comp_value_zero_page(
&mut self.pc_counter,
self.x,
operand,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xEC => {
// compare x reg absolute
let operand = ram.read_mem_address(self.pc_counter + 1);
compare::comp_value_absolute(
&mut self.pc_counter,
self.x,
operand,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xC0 => {
// compare y reg immediate
let imm_value = ram.read_mem_value(self.pc_counter + 1);
compare::comp_value_immediate(
&mut self.pc_counter,
self.y,
imm_value,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xC4 => {
// compare y reg zero page
let operand = ram.read_mem_value(self.pc_counter + 1);
compare::comp_value_zero_page(
&mut self.pc_counter,
self.y,
operand,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xCC => {
// compare y reg absolute
let operand = ram.read_mem_address(self.pc_counter + 1);
compare::comp_value_absolute(
&mut self.pc_counter,
self.y,
operand,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
// compare instructions end
// --------------------------
// decrement instructions start
0xC6 => {
// zero page
let page = ram.read_mem_value(self.pc_counter + 1);
increment_decrement::incdec_memory_zero_page(
&mut self.pc_counter,
page,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
increment_decrement::Operation::Dec,
);
}
0xD6 => {
//zero page x
let page = ram.read_mem_value(self.pc_counter + 1);
increment_decrement::incdec_memory_zero_page_x(
&mut self.pc_counter,
page,
self.x,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
increment_decrement::Operation::Dec,
);
}
0xCE => {
//absolute
let addr = ram.read_mem_address(self.pc_counter + 1);
increment_decrement::incdec_memory_absolute(
&mut self.pc_counter,
addr,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
increment_decrement::Operation::Dec,
);
}
0xDE => {
// absolute x
let addr = ram.read_mem_address(self.pc_counter + 1);
increment_decrement::incdec_memory_absolute_x(
&mut self.pc_counter,
addr,
self.x,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
increment_decrement::Operation::Dec,
);
}
0xCA => {
// dec x
increment_decrement::incdec_reg(
&mut self.pc_counter,
&mut self.x,
&mut self.status_flags,
&mut self.cycles_until_next,
increment_decrement::Operation::Dec,
);
}
0x88 => {
//dec y
increment_decrement::incdec_reg(
&mut self.pc_counter,
&mut self.y,
&mut self.status_flags,
&mut self.cycles_until_next,
increment_decrement::Operation::Dec,
);
}
// decrement instructions end
// -------------------------
// increment instructions start
0xE6 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
increment_decrement::incdec_memory_zero_page(
&mut self.pc_counter,
zero_page,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
increment_decrement::Operation::Inc,
)
}
0xF6 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
increment_decrement::incdec_memory_zero_page_x(
&mut self.pc_counter,
zero_page,
self.x,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
increment_decrement::Operation::Inc,
)
}
0xEE => {
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
increment_decrement::incdec_memory_absolute(
&mut self.pc_counter,
absolute_addr,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
increment_decrement::Operation::Inc,
);
}
0xFE => {
let addr = ram.read_mem_address(self.pc_counter + 1);
increment_decrement::incdec_memory_absolute_x(
&mut self.pc_counter,
addr,
self.x,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
increment_decrement::Operation::Inc,
);
}
0xE8 => {
increment_decrement::incdec_reg(
&mut self.pc_counter,
&mut self.x,
&mut self.status_flags,
&mut self.cycles_until_next,
increment_decrement::Operation::Inc,
);
}
0xC8 => {
increment_decrement::incdec_reg(
&mut self.pc_counter,
&mut self.y,
&mut self.status_flags,
&mut self.cycles_until_next,
increment_decrement::Operation::Inc,
);
}
// increment instructions end
// -------------------------
// jump and return instructions start
0x4C => {
// jump absolute
let mem_addr = ram.read_mem_address(self.pc_counter + 1);
jumps::jump_absolute(&mut self.pc_counter, mem_addr, &mut self.cycles_until_next);
}
0x6C => {
// jump indirect
let mem_addr = ram.read_mem_address(self.pc_counter + 1);
jumps::jump_indirect(
&mut self.pc_counter,
mem_addr,
ram,
&mut self.cycles_until_next,
);
}
0x20 => {
// jump subroutine absolute
let mem_addr = ram.read_mem_address(self.pc_counter + 1);
jumps::jump_subroutine(
&mut self.pc_counter,
mem_addr,
&mut self.stack_pointer,
ram,
&mut self.cycles_until_next,
);
}
0x40 => {
jumps::return_from_interrupt(
&mut self.pc_counter,
&mut self.stack_pointer,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x60 => {
//return from subroutine
jumps::return_from_subroutine(
&mut self.pc_counter,
&mut self.stack_pointer,
ram,
&mut self.cycles_until_next,
);
}
// jump and return instructions end
// -------------------------
// load instructions start
0xA9 => {
let immediate = ram.read_mem_value(self.pc_counter + 1);
self.accumulator = immediate;
load_store::set_flags(immediate, &mut self.status_flags);
self.pc_counter += 2;
self.cycles_until_next = 2;
}
0xA5 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
self.accumulator = load_store::load_zero_page(
&mut self.pc_counter,
zero_page,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xB5 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
self.accumulator = load_store::load_zero_page_reg(
&mut self.pc_counter,
zero_page,
self.x,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xAD => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
self.accumulator = load_store::absolute_load(
&mut self.pc_counter,
absolute,
0,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xBD => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
self.accumulator = load_store::absolute_load(
&mut self.pc_counter,
absolute,
self.x,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xB9 => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
self.accumulator = load_store::absolute_load(
&mut self.pc_counter,
absolute,
self.y,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xA1 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
self.accumulator = load_store::indirect_x_load(
&mut self.pc_counter,
zero_page,
self.x,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
)
}
0xB1 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
self.accumulator = load_store::indirect_y_load(
&mut self.pc_counter,
zero_page,
self.y,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
)
}
0xA2 => {
let immediate = ram.read_mem_value(self.pc_counter + 1);
self.x = immediate;
load_store::set_flags(immediate, &mut self.status_flags);
self.pc_counter += 2;
self.cycles_until_next = 2;
}
0xA6 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
self.x = load_store::load_zero_page(
&mut self.pc_counter,
zero_page,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xB6 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
self.x = load_store::load_zero_page_reg(
&mut self.pc_counter,
zero_page,
self.y,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xAE => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
self.x = load_store::absolute_load(
&mut self.pc_counter,
absolute,
0,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xBE => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
self.x = load_store::absolute_load(
&mut self.pc_counter,
absolute,
self.y,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xA0 => {
let immediate = ram.read_mem_value(self.pc_counter + 1);
self.y = immediate;
load_store::set_flags(immediate, &mut self.status_flags);
self.pc_counter += 2;
self.cycles_until_next = 2;
}
0xA4 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
self.y = load_store::load_zero_page(
&mut self.pc_counter,
zero_page,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xB4 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
self.y = load_store::load_zero_page_reg(
&mut self.pc_counter,
zero_page,
self.x,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xAC => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
self.y = load_store::absolute_load(
&mut self.pc_counter,
absolute,
0,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xBC => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
self.y = load_store::absolute_load(
&mut self.pc_counter,
absolute,
self.x,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
// load instructions end
// -------------------------
// store instructions start
0x85 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
load_store::store_zero_page(
&mut self.pc_counter,
self.accumulator,
zero_page,
0,
ram,
&mut self.cycles_until_next,
);
}
0x95 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
load_store::store_zero_page(
&mut self.pc_counter,
self.accumulator,
zero_page,
self.x,
ram,
&mut self.cycles_until_next,
);
}
0x8D => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
load_store::store_absolute(
&mut self.pc_counter,
self.accumulator,
absolute,
0,
ram,
&mut self.cycles_until_next,
);
}
0x9D => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
load_store::store_absolute(
&mut self.pc_counter,
self.accumulator,
absolute,
self.x,
ram,
&mut self.cycles_until_next,
);
}
0x99 => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
load_store::store_absolute(
&mut self.pc_counter,
self.accumulator,
absolute,
self.y,
ram,
&mut self.cycles_until_next,
);
}
0x81 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
load_store::store_indirect_x(
&mut self.pc_counter,
self.accumulator,
zero_page,
self.x,
ram,
&mut self.cycles_until_next,
);
}
0x91 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
load_store::store_indirect_y(
&mut self.pc_counter,
self.accumulator,
zero_page,
self.y,
ram,
&mut self.cycles_until_next,
);
}
0x86 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
load_store::store_zero_page(
&mut self.pc_counter,
self.x,
zero_page,
0,
ram,
&mut self.cycles_until_next,
);
}
0x96 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
load_store::store_zero_page(
&mut self.pc_counter,
self.x,
zero_page,
self.y,
ram,
&mut self.cycles_until_next,
);
}
0x8E => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
load_store::store_absolute(
&mut self.pc_counter,
self.x,
absolute,
0,
ram,
&mut self.cycles_until_next,
);
}
0x84 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
load_store::store_zero_page(
&mut self.pc_counter,
self.y,
zero_page,
0,
ram,
&mut self.cycles_until_next,
);
}
0x94 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
load_store::store_zero_page(
&mut self.pc_counter,
self.y,
zero_page,
self.x,
ram,
&mut self.cycles_until_next,
);
}
0x8C => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
load_store::store_absolute(
&mut self.pc_counter,
self.y,
absolute,
0,
ram,
&mut self.cycles_until_next,
);
}
// store instructions end
// -------------------------
// logical shift right start
0x4A => lsr::lsr_accumulator(
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
&mut self.cycles_until_next,
),
0x46 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
lsr::lsr_zero_page(
&mut self.pc_counter,
zero_page,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x56 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
lsr::lsr_zero_page_x(
&mut self.pc_counter,
self.x,
zero_page,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x4E => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
lsr::lsr_absolute(
&mut self.pc_counter,
absolute,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x5E => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
lsr::lsr_absolute_x(
&mut self.pc_counter,
self.x,
absolute,
ram,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
// logical shift right end
// -------------------------
// exclusive or instructions start
0x49 => {
let immediate = ram.read_mem_value(self.pc_counter + 1);
or::xor_immediate(
immediate,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x45 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
or::xor_zero_page(
zero_page,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x55 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
or::xor_zero_page_x(
zero_page,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x4D => {
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
or::xor_absolute(
absolute_addr,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x5D => {
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
or::xor_absolute_reg(
absolute_addr,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x59 => {
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
or::xor_absolute_reg(
absolute_addr,
self.y,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x41 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
or::xor_indexed_indirect(
zero_page,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x51 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
or::xor_indirect_indexed(
zero_page,
self.y,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
// exclusive or instructions end
// -------------------------
// inclusive or instruction start
0x09 => {
let immediate = ram.read_mem_value(self.pc_counter + 1);
or::ior_immediate(
immediate,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x05 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
or::ior_zero_page(
zero_page,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x15 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
or::ior_zero_page_x(
zero_page,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x0D => {
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
or::ior_absolute(
absolute_addr,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x1D => {
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
or::ior_absolute_reg(
absolute_addr,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x19 => {
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
or::ior_absolute_reg(
absolute_addr,
self.y,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x01 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
or::ior_indexed_indirect(
zero_page,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x11 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
or::ior_indirect_indexed(
zero_page,
self.y,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
// inclusive or instruction end
// -------------------------
// push and pull instructions start
0x48 => {
misc_instructions::push_acc_on_stack(
&mut self.pc_counter,
self.accumulator,
&mut self.stack_pointer,
ram,
&mut self.cycles_until_next,
);
}
0x08 => {
misc_instructions::push_status_on_stack(
&mut self.pc_counter,
self.status_flags,
&mut self.stack_pointer,
ram,
&mut self.cycles_until_next,
);
}
0x68 => {
misc_instructions::pull_acc_from_stack(
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
&mut self.stack_pointer,
ram,
&mut self.cycles_until_next,
);
//println!("{:#b} {:#b}", self.accumulator, self.status_flags);
}
0x28 => {
misc_instructions::pull_status_from_stack(
&mut self.pc_counter,
&mut self.status_flags,
&mut self.stack_pointer,
ram,
&mut self.cycles_until_next,
);
}
// push and pull instructions end
// -------------------------
// rotate instructions start
0x2A => {
rol::rol_accumulator(
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x26 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
rol::rol_zero_page(
&mut self.pc_counter,
zero_page,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x36 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
rol::rol_zero_page_x(
&mut self.pc_counter,
self.x,
zero_page,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x2E => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
rol::rol_absolute(
&mut self.pc_counter,
absolute,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x3E => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
rol::rol_absolute_x(
&mut self.pc_counter,
self.x,
absolute,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x6A => {
ror::ror_accumulator(
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x66 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
ror::ror_zero_page(
&mut self.pc_counter,
zero_page,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x76 => {
let zero_page = ram.read_mem_value(self.pc_counter + 1);
ror::ror_zero_page_x(
&mut self.pc_counter,
self.x,
zero_page,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x6E => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
ror::ror_absolute(
&mut self.pc_counter,
absolute,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0x7E => {
let absolute = ram.read_mem_address(self.pc_counter + 1);
ror::ror_absolute_x(
&mut self.pc_counter,
self.x,
absolute,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
// rotate instructions end
// -------------------------
// transfer instructions start
0xAA => {
// transfer acc to x
misc_instructions::transfer_source_to_dest(
&mut self.pc_counter,
self.accumulator,
&mut self.x,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xA8 => {
// transfer acc to y
misc_instructions::transfer_source_to_dest(
&mut self.pc_counter,
self.accumulator,
&mut self.y,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x8A => {
// transfer x to acc
misc_instructions::transfer_source_to_dest(
&mut self.pc_counter,
self.x,
&mut self.accumulator,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xBA => {
// transfer stack pointer to x
misc_instructions::transfer_source_to_dest(
&mut self.pc_counter,
self.stack_pointer,
&mut self.x,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0x9A => {
// transfer x to stack pointer
misc_instructions::transfer_x_to_stack_pointer(
&mut self.pc_counter,
self.x,
&mut self.stack_pointer,
&mut self.cycles_until_next,
);
}
0x98 => {
// transfer y to acc
misc_instructions::transfer_source_to_dest(
&mut self.pc_counter,
self.y,
&mut self.accumulator,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
// transfer instructions end
//-------------------------
//subc instructions start
0xE9 => {
let imm_value = ram.read_mem_value(self.pc_counter + 1);
sub::sbc_immediate(
imm_value,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
&mut self.cycles_until_next,
);
}
0xE5 => {
// zero page
let zero_page_addr = ram.read_mem_value(self.pc_counter + 1);
sub::sbc_zero_page(
zero_page_addr,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0xF5 => {
// zero page x
let zero_page_addr = ram.read_mem_value(self.pc_counter + 1);
sub::sbc_zero_page_x(
zero_page_addr,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0xED => {
// absolute
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
sub::sbc_absolute(
absolute_addr,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0xFD => {
// absolute
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
sub::sbc_absolute_reg(
absolute_addr,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0xF9 => {
// absolute
let absolute_addr = ram.read_mem_address(self.pc_counter + 1);
sub::sbc_absolute_reg(
absolute_addr,
self.y,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0xE1 => {
// indirect x
let addr = ram.read_mem_value(self.pc_counter + 1);
sub::sbc_indexed_indirect(
addr,
self.x,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
0xF1 => {
let addr = ram.read_mem_value(self.pc_counter + 1);
sub::sbc_indirect_indexed(
addr,
self.y,
&mut self.pc_counter,
&mut self.accumulator,
&mut self.status_flags,
ram,
&mut self.cycles_until_next,
);
}
//subc instructions end
//------------------------
//nop
0xEA | 0xFA | 0x1A | 0x5A | 0x7A | 0xDA | 0x3A | 0xFA => {
misc_instructions::NOP(&mut self.pc_counter, &mut self.cycles_until_next);
}
// unofficial IGN nop 3 byte
0x0C | 0x1C | 0x3C | 0x5C | 0x7C | 0xDC | 0xFC => {
self.pc_counter += 3;
self.cycles_until_next = 5;
}
// unofficial IGN nop 2 bytes
0x04 | 0x44 | 0x64 | 0x14 | 0x34 | 0x54 | 0x74 | 0xD4 | 0xF4 | 0x80 => {
self.pc_counter += 2;
self.cycles_until_next = 4;
}
// misc
0xD8 | 0xB8 | 0xF8 => {
self.pc_counter += 1;
self.cycles_until_next = 2;
}
_ => panic!("{:#x}", opcode),
}
}
}
|
// auto generated, do not modify.
// created: Mon Feb 22 23:57:02 2016
// src-file: /QtCore/qfinalstate.h
// dst-file: /src/core/qfinalstate.rs
//
// header block begin =>
#![feature(libc)]
#![feature(core)]
#![feature(collections)]
extern crate libc;
use self::libc::*;
// <= header block end
// main block begin =>
// <= main block end
// use block begin =>
use super::qabstractstate::*; // 773
use std::ops::Deref;
use super::qstate::*; // 773
use super::qobjectdefs::*; // 773
// <= use block end
// ext block begin =>
// #[link(name = "Qt5Core")]
// #[link(name = "Qt5Gui")]
// #[link(name = "Qt5Widgets")]
// #[link(name = "QtInline")]
extern {
fn QFinalState_Class_Size() -> c_int;
// proto: void QFinalState::QFinalState(QState * parent);
fn C_ZN11QFinalStateC2EP6QState(arg0: *mut c_void) -> u64;
// proto: void QFinalState::~QFinalState();
fn C_ZN11QFinalStateD2Ev(qthis: u64 /* *mut c_void*/);
// proto: const QMetaObject * QFinalState::metaObject();
fn C_ZNK11QFinalState10metaObjectEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
} // <= ext block end
// body block begin =>
// class sizeof(QFinalState)=1
#[derive(Default)]
pub struct QFinalState {
qbase: QAbstractState,
pub qclsinst: u64 /* *mut c_void*/,
}
impl /*struct*/ QFinalState {
pub fn inheritFrom(qthis: u64 /* *mut c_void*/) -> QFinalState {
return QFinalState{qbase: QAbstractState::inheritFrom(qthis), qclsinst: qthis, ..Default::default()};
}
}
impl Deref for QFinalState {
type Target = QAbstractState;
fn deref(&self) -> &QAbstractState {
return & self.qbase;
}
}
impl AsRef<QAbstractState> for QFinalState {
fn as_ref(& self) -> & QAbstractState {
return & self.qbase;
}
}
// proto: void QFinalState::QFinalState(QState * parent);
impl /*struct*/ QFinalState {
pub fn new<T: QFinalState_new>(value: T) -> QFinalState {
let rsthis = value.new();
return rsthis;
// return 1;
}
}
pub trait QFinalState_new {
fn new(self) -> QFinalState;
}
// proto: void QFinalState::QFinalState(QState * parent);
impl<'a> /*trait*/ QFinalState_new for (Option<&'a QState>) {
fn new(self) -> QFinalState {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN11QFinalStateC2EP6QState()};
let ctysz: c_int = unsafe{QFinalState_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let arg0 = (if self.is_none() {0} else {self.unwrap().qclsinst}) as *mut c_void;
let qthis: u64 = unsafe {C_ZN11QFinalStateC2EP6QState(arg0)};
let rsthis = QFinalState{qbase: QAbstractState::inheritFrom(qthis), qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: void QFinalState::~QFinalState();
impl /*struct*/ QFinalState {
pub fn free<RetType, T: QFinalState_free<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.free(self);
// return 1;
}
}
pub trait QFinalState_free<RetType> {
fn free(self , rsthis: & QFinalState) -> RetType;
}
// proto: void QFinalState::~QFinalState();
impl<'a> /*trait*/ QFinalState_free<()> for () {
fn free(self , rsthis: & QFinalState) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN11QFinalStateD2Ev()};
unsafe {C_ZN11QFinalStateD2Ev(rsthis.qclsinst)};
// return 1;
}
}
// proto: const QMetaObject * QFinalState::metaObject();
impl /*struct*/ QFinalState {
pub fn metaObject<RetType, T: QFinalState_metaObject<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.metaObject(self);
// return 1;
}
}
pub trait QFinalState_metaObject<RetType> {
fn metaObject(self , rsthis: & QFinalState) -> RetType;
}
// proto: const QMetaObject * QFinalState::metaObject();
impl<'a> /*trait*/ QFinalState_metaObject<QMetaObject> for () {
fn metaObject(self , rsthis: & QFinalState) -> QMetaObject {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK11QFinalState10metaObjectEv()};
let mut ret = unsafe {C_ZNK11QFinalState10metaObjectEv(rsthis.qclsinst)};
let mut ret1 = QMetaObject::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// <= body block end
|
use rstris::figure::Figure;
use rstris::find_path::*;
use rstris::find_placement::*;
use rstris::movement::Movement;
use rstris::playfield::Playfield;
use rstris::position::Position;
use crate::game::Game;
pub trait ComputerType {
fn init_eval(&mut self, pf: &Playfield, avail_placings: usize);
fn eval_placing(&mut self, pf: &Playfield, fig: &Figure, pos: Position) -> f32;
}
struct EvalPosition {
pos: Position,
eval: f32,
}
pub struct ComputerPlayer<T>
where
T: ComputerType,
{
com_type: T,
moves_per_down_step: f32,
last_figure: Option<(Figure, Position)>,
// Some cache variables
avail_placings: Vec<Position>,
find_path: FindPath,
eval_placing: Vec<EvalPosition>,
path: Vec<Movement>,
moves_per_level: Vec<(i32, Movement)>,
}
impl<T> ComputerPlayer<T>
where
T: ComputerType,
{
pub fn new(moves_per_down_step: f32, com_type: T) -> Self {
ComputerPlayer {
moves_per_down_step,
com_type,
moves_per_level: Vec::new(),
last_figure: None,
eval_placing: Vec::new(),
path: Vec::new(),
find_path: FindPath::default(),
avail_placings: Vec::new(),
}
}
fn figure_move_event(&mut self, game: &mut Game, ticks: u64, _fig: &Figure, pos: Position) {
let last_y = match self.last_figure {
Some((_, ref last_fig_pos)) => last_fig_pos.y(),
None => -1,
};
let y = pos.y();
if y > last_y {
let mut move_time = 0;
while !self.moves_per_level.is_empty() && y == self.moves_per_level[0].0 {
let movement = self.moves_per_level.remove(0);
game.add_move(movement.1, ticks + move_time);
move_time += (game.down_step_time() as f32 / self.moves_per_down_step) as u64;
}
}
}
fn new_figure_event(&mut self, _ticks: u64, pf: &Playfield, fig: &Figure, pos: Position) {
// Find all possible positions where figure can be placed
self.avail_placings.clear();
find_placement(&mut self.avail_placings, &pf, fig);
// Evaluate all placings to find the best one
self.com_type.init_eval(&pf, self.avail_placings.len());
self.eval_placing.clear();
for avail_pos in &self.avail_placings {
let eval = self.com_type.eval_placing(&pf, &fig, *avail_pos);
let eval_pos = EvalPosition {
pos: *avail_pos,
eval,
};
self.eval_placing.push(eval_pos);
}
self.eval_placing
.sort_by(|a, b| b.eval.partial_cmp(&a.eval).unwrap());
// Find a path to first (and best) available placing
self.path.clear();
for eval_pos in &self.eval_placing {
self.find_path.search(
&mut self.path,
&pf,
fig,
pos,
eval_pos.pos,
self.moves_per_down_step,
);
if !self.path.is_empty() {
break;
}
}
self.moves_per_level.clear();
if !self.path.is_empty() {
self.path.reverse();
// Convert the path from being in exact Movements to
// describe the sideways/rotational movements per height level
path_to_moves_per_level(&mut self.moves_per_level, &self.path);
}
}
pub fn act_on_game(&mut self, game: &mut Game, ticks: u64) {
if self.last_figure != *game.current_figure() {
// Figure has changed since last call
let current_figure = game.current_figure().clone();
if let Some((ref fig, pos)) = current_figure {
if self.last_figure == None {
// Test if new figure
self.new_figure_event(ticks, game.playfield(), fig, pos);
self.figure_move_event(game, ticks, fig, pos);
} else {
self.figure_move_event(game, ticks, fig, pos);
}
}
self.last_figure = current_figure;
}
}
}
fn path_to_moves_per_level(moves: &mut Vec<(i32, Movement)>, path: &[Movement]) {
moves.clear();
let mut level: i32 = 0;
for movement in path {
if *movement == Movement::MoveDown {
level += 1;
} else {
moves.push((level, *movement));
}
}
}
|
//! Common and useful utility traits.
use core::hint::unreachable_unchecked;
use core::ptr::{self, NonNull};
use crate::pointer::{
Marked::{self, Null, Value},
MarkedNonNullable,
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// UnwrapPtr (trait)
////////////////////////////////////////////////////////////////////////////////////////////////////
/// A trait that adds a method to ergonomically extract a `*const T' from an
/// [`Option`] of a non-nullable pointer or reference type.
pub trait UnwrapPtr {
/// The type to which the [`Option`] contains a pointer or reference.
type Item: Sized;
/// Unwraps the [`Option`] and returns the contained value converted to a
/// `const` pointer or `null`.
fn unwrap_ptr(self) -> *const Self::Item;
}
/********** blanket impls *************************************************************************/
impl<'a, T> UnwrapPtr for Option<&'a T> {
type Item = T;
#[inline]
fn unwrap_ptr(self) -> *const Self::Item {
match self {
Some(value) => value as *const _,
None => ptr::null(),
}
}
}
impl<'a, T> UnwrapPtr for Option<&'a mut T> {
type Item = T;
#[inline]
fn unwrap_ptr(self) -> *const Self::Item {
match self {
Some(value) => value as *mut _,
None => ptr::null(),
}
}
}
impl<T> UnwrapPtr for Option<NonNull<T>> {
type Item = T;
#[inline]
fn unwrap_ptr(self) -> *const Self::Item {
match self {
Some(value) => value.as_ptr() as *const _,
None => ptr::null(),
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// UnwrapMutPtr (trait)
////////////////////////////////////////////////////////////////////////////////////////////////////
/// A trait that adds a method to ergonomically extract a `*mut T' from an
/// [`Option`] of a non-nullable pointer or reference type.
pub trait UnwrapMutPtr: UnwrapPtr {
/// Unwraps the [`Option`] and returns the contained value converted to a
/// `mut` pointer or `null`.
fn unwrap_mut_ptr(self) -> *mut <Self as UnwrapPtr>::Item;
}
/********** blanket impls *************************************************************************/
impl<'a, T> UnwrapMutPtr for Option<&'a mut T> {
#[inline]
fn unwrap_mut_ptr(self) -> *mut Self::Item {
self.unwrap_ptr() as *mut _
}
}
impl<T> UnwrapMutPtr for Option<NonNull<T>> {
#[inline]
fn unwrap_mut_ptr(self) -> *mut Self::Item {
self.unwrap_ptr() as *mut _
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// UnwrapUnchecked (trait)
////////////////////////////////////////////////////////////////////////////////////////////////////
/// A trait for adding an `unsafe` unwrapping method to [`Option`] like types.
pub trait UnwrapUnchecked {
/// The contained type that will be unwrapped.
type Item: Sized;
/// Unwraps the contained item in an [`Option`] like type **without**
/// checking if the value actually exists.
///
/// # Safety
///
/// The caller has to ensure `self` actually contains an item, otherwise,
/// there will be undefined behaviour.
///
/// # Panics
///
/// This method may panic in debug builds, if it is called on a value that
/// does not contain an item
unsafe fn unwrap_unchecked(self) -> Self::Item;
}
/********** blanket impls *************************************************************************/
impl<T> UnwrapUnchecked for Option<T> {
type Item = T;
#[inline]
unsafe fn unwrap_unchecked(self) -> Self::Item {
debug_assert!(self.is_some(), "`unwrap_unchecked` called on a `None`");
match self {
Some(value) => value,
None => unreachable_unchecked(),
}
}
}
impl<T, E> UnwrapUnchecked for Result<T, E> {
type Item = T;
#[inline]
unsafe fn unwrap_unchecked(self) -> Self::Item {
debug_assert!(self.is_ok(), "`unwrap_unchecked` called on an `Err`");
match self {
Ok(value) => value,
Err(_) => unreachable_unchecked(),
}
}
}
impl<T: MarkedNonNullable> UnwrapUnchecked for Marked<T> {
type Item = T;
#[inline]
unsafe fn unwrap_unchecked(self) -> Self::Item {
debug_assert!(self.is_value(), "`unwrap_unchecked` called on a `Null`");
match self {
Value(value) => value,
Null(_) => unreachable_unchecked(),
}
}
}
|
use std::io;
use std::os::unix::net;
use std::os::unix::prelude::*;
use std::path::Path;
use libc;
use mio::event::Evented;
use mio::unix::EventedFd;
use mio::{Poll, PollOpt, Ready, Token};
use UnixStream;
use cvt;
use socket::{sockaddr_un, Socket};
/// A structure representing a Unix domain socket server.
///
/// This listener can be used to accept new streams connected to a remote
/// endpoint, through which the `read` and `write` methods can be used to
/// communicate.
#[derive(Debug)]
pub struct UnixListener {
inner: net::UnixListener,
}
impl UnixListener {
/// Creates a new `UnixListener` bound to the specified socket.
pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixListener> {
UnixListener::_bind(path.as_ref())
}
fn _bind(path: &Path) -> io::Result<UnixListener> {
unsafe {
let (addr, len) = try!(sockaddr_un(path));
let fd = try!(Socket::new(libc::SOCK_STREAM));
let addr = &addr as *const _ as *const _;
try!(cvt(libc::bind(fd.fd(), addr, len)));
try!(cvt(libc::listen(fd.fd(), 128)));
Ok(UnixListener::from_raw_fd(fd.into_fd()))
}
}
/// Consumes a standard library `UnixListener` and returns a wrapped
/// `UnixListener` compatible with mio.
///
/// The returned stream is moved into nonblocking mode and is otherwise
/// ready to get associated with an event loop.
pub fn from_listener(stream: net::UnixListener) -> io::Result<UnixListener> {
try!(stream.set_nonblocking(true));
Ok(UnixListener { inner: stream })
}
/// Accepts a new incoming connection to this listener.
///
/// When established, the corresponding `UnixStream` and the remote peer's
/// address will be returned as `Ok(Some(...))`. If there is no connection
/// waiting to be accepted, then `Ok(None)` is returned.
///
/// If an error happens while accepting, `Err` is returned.
pub fn accept(&self) -> io::Result<Option<(UnixStream, net::SocketAddr)>> {
match try!(self.accept_std()) {
Some((stream, addr)) => Ok(Some((UnixStream::from_stream(stream)?, addr))),
None => Ok(None),
}
}
/// Accepts a new incoming connection to this listener.
///
/// This method is the same as `accept`, except that it returns a UDP socket *in blocking mode*
/// which isn't bound to a `mio` type. This can later be converted to a `mio` type, if
/// necessary.
///
/// If an error happens while accepting, `Err` is returned.
pub fn accept_std(&self) -> io::Result<Option<(net::UnixStream, net::SocketAddr)>> {
match self.inner.accept() {
Ok((socket, addr)) => Ok(Some(unsafe {
(net::UnixStream::from_raw_fd(socket.into_raw_fd()), addr)
})),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => Ok(None),
Err(e) => Err(e),
}
}
/// Creates a new independently owned handle to the underlying socket.
///
/// The returned `UnixListener` is a reference to the same socket that this
/// object references. Both handles can be used to accept incoming
/// connections and options set on one listener will affect the other.
pub fn try_clone(&self) -> io::Result<UnixListener> {
self.inner.try_clone().map(|l| UnixListener { inner: l })
}
/// Returns the local socket address of this listener.
pub fn local_addr(&self) -> io::Result<net::SocketAddr> {
self.inner.local_addr()
}
/// Returns the value of the `SO_ERROR` option.
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.inner.take_error()
}
}
impl Evented for UnixListener {
fn register(&self, poll: &Poll, token: Token, events: Ready, opts: PollOpt) -> io::Result<()> {
EventedFd(&self.as_raw_fd()).register(poll, token, events, opts)
}
fn reregister(
&self,
poll: &Poll,
token: Token,
events: Ready,
opts: PollOpt,
) -> io::Result<()> {
EventedFd(&self.as_raw_fd()).reregister(poll, token, events, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
EventedFd(&self.as_raw_fd()).deregister(poll)
}
}
impl AsRawFd for UnixListener {
fn as_raw_fd(&self) -> i32 {
self.inner.as_raw_fd()
}
}
impl IntoRawFd for UnixListener {
fn into_raw_fd(self) -> i32 {
self.inner.into_raw_fd()
}
}
impl FromRawFd for UnixListener {
unsafe fn from_raw_fd(fd: i32) -> UnixListener {
UnixListener {
inner: net::UnixListener::from_raw_fd(fd),
}
}
}
|
//! A segmented vector type which pins the element `T`.
#![allow(clippy::len_without_is_empty)]
use std::mem::{self, MaybeUninit};
use std::ops::{Index, IndexMut};
use std::pin::Pin;
use std::ptr::drop_in_place;
use std::slice;
/// A segmented vector type which pins the element `T`.
pub struct PinVec<T> {
// Slots of memory. Once one has been allocated it is never moved. This
// allows us to store entries in there and fetch them as `Pin<&mut T>`.
slots: Vec<Box<[MaybeUninit<T>]>>,
// Number of initialized elements in the segmented vector. Allows us to calculate
// how many elements in each slot are initialized.
len: usize,
}
impl<T> PinVec<T> {
/// Construct a new empty vector.
///
/// # Examples
///
/// ```
/// use unicycle::pin_vec::PinVec;
///
/// const VECTOR: PinVec<u32> = PinVec::new();
/// ```
pub const fn new() -> Self {
Self {
slots: Vec::new(),
len: 0,
}
}
/// Get the length of the segmented vector.
///
/// # Examples
///
/// ```
/// use unicycle::pin_vec::PinVec;
///
/// let mut vector = PinVec::<u32>::new();
///
/// assert_eq!(vector.len(), 0);
/// vector.push(42);
/// assert_eq!(vector.len(), 1);
/// vector.clear();
/// assert_eq!(vector.len(), 0);
/// ```
#[inline]
pub fn len(&self) -> usize {
self.len
}
/// Clear the segmented vector, dropping each element in it as appropriate.
///
/// # Examples
///
/// ```
/// use unicycle::pin_vec::PinVec;
///
/// let mut vector = PinVec::<u32>::new();
/// vector.push(42);
/// assert_eq!(vector.get(0), Some(&42));
/// vector.clear();
/// assert_eq!(vector.get(0), None);
/// ```
pub fn clear(&mut self) {
if mem::needs_drop::<T>() {
let (last_slot, offset, _) = calculate_key(self.len());
for (i, mut slot) in self.slots.drain(..).enumerate() {
// SAFETY:
// * We initialized slice to only point to the
// already-initialized elements.
// * It's safe to `drop_in_place` because we are draining the
// Vec and have ownership of the elements in the slot.
unsafe {
let len = if i < last_slot { slot.len() } else { offset };
let base = slot.as_mut_ptr().cast::<T>();
drop_in_place(slice::from_raw_parts_mut(base, len));
}
}
} else {
self.slots.clear();
}
debug_assert_eq!(self.slots.len(), 0);
self.len = 0;
}
/// Get an immutable element from the segmented vector through the given `index`.
///
/// # Examples
///
/// ```
/// use unicycle::pin_vec::PinVec;
///
/// let mut vector = PinVec::<u32>::new();
/// vector.push(42);
/// assert_eq!(vector.get(0), Some(&42));
/// ```
pub fn get(&self, index: usize) -> Option<&T> {
if index < self.len() {
let (slot, offset, _) = calculate_key(index);
// Safety: We guarantee that all indices <= self.len are initialized
unsafe { Some(self.slots[slot][offset].assume_init_ref()) }
} else {
None
}
}
/// Get a mutable element from the segmented vector through the given `index` if `T`
/// is [Unpin].
///
/// # Examples
///
/// ```
/// use unicycle::pin_vec::PinVec;
///
/// let mut vector = PinVec::<u32>::new();
/// vector.push(42);
/// *vector.get_mut(0).unwrap() = 42;
///
/// assert_eq!(vector.get_mut(0), Some(&mut 42));
/// ```
pub fn get_mut(&mut self, index: usize) -> Option<&mut T>
where
T: Unpin,
{
Some(self.get_pin_mut(index)?.get_mut())
}
/// Get a pinned element from the segmented vector through the given `index`.
///
/// # Examples
///
/// ```
/// use std::pin::Pin;
///
/// use unicycle::pin_vec::PinVec;
///
/// let mut vector = PinVec::new();
/// vector.push(async {});
/// let future: Pin<_> = vector.get_pin_mut(0).unwrap();
///
/// assert!(vector.get_pin_mut(42).is_none());
/// ```
pub fn get_pin_mut(&mut self, index: usize) -> Option<Pin<&mut T>> {
if index < self.len() {
let (slot, offset, _) = calculate_key(index);
// SAFETY:
// * pin: We are fetching an element from a Box<[T]> which this type
// guarantees will never move.
// * uninit: We've checked the length above and know that we are
// withing the range of initialized elements.
unsafe {
Some(Pin::new_unchecked(
self.slots[slot][offset].assume_init_mut(),
))
}
} else {
None
}
}
/// Push an element into the segmented vector.
///
/// # Examples
///
/// ```
/// use std::pin::Pin;
///
/// use unicycle::pin_vec::PinVec;
///
/// let mut vector = PinVec::new();
/// vector.push(async {});
/// let future: Pin<_> = vector.get_pin_mut(0).unwrap();
///
/// assert!(vector.get_pin_mut(42).is_none());
/// ```
pub fn push(&mut self, item: T) {
let (slot, offset, slot_len) = calculate_key(self.len);
if slot == self.slots.len() {
let slot = (0..slot_len)
.map(|_| MaybeUninit::uninit())
.collect::<Box<[_]>>();
self.slots.push(slot);
}
self.slots[slot][offset].write(item);
self.len += 1;
}
}
impl<T> Extend<T> for PinVec<T> {
#[inline]
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = T>,
{
for item in iter {
self.push(item);
}
}
}
impl<T> Drop for PinVec<T> {
fn drop(&mut self) {
self.clear();
}
}
impl<T> Index<usize> for PinVec<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index).unwrap()
}
}
impl<T: Unpin> IndexMut<usize> for PinVec<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_mut(index).unwrap()
}
}
// Size of the first slot.
const FIRST_SLOT_SIZE: usize = 16;
// The initial number of bits to ignore for the first slot.
const FIRST_SLOT_MASK: usize =
std::mem::size_of::<usize>() * 8 - FIRST_SLOT_SIZE.leading_zeros() as usize - 1;
/// Calculate the key as a (slot, offset, len) tuple.
const fn calculate_key(key: usize) -> (usize, usize, usize) {
assert!(key < (1usize << (mem::size_of::<usize>() * 8 - 1)));
let slot = ((mem::size_of::<usize>() * 8) - key.leading_zeros() as usize)
.saturating_sub(FIRST_SLOT_MASK);
let (start, end) = if key < FIRST_SLOT_SIZE {
(0, FIRST_SLOT_SIZE)
} else {
(FIRST_SLOT_SIZE << (slot - 1), FIRST_SLOT_SIZE << slot)
};
(slot, key - start, end - start)
}
#[cfg(test)]
mod tests {
use crate::pin_vec::calculate_key;
use super::PinVec;
#[test]
fn key_test() {
// NB: range of the first slot.
assert_eq!((0, 0, 16), calculate_key(0));
assert_eq!((0, 15, 16), calculate_key(15));
for i in 4..=62 {
let end_range = 1usize << i;
assert_eq!((i - 3, 0, end_range), calculate_key(end_range));
assert_eq!(
(i - 3, end_range - 1, end_range),
calculate_key((1usize << (i + 1)) - 1)
);
}
}
#[test]
fn run_destructors() {
let mut destructor_ran = false;
struct RunDestructor<'a>(&'a mut bool);
impl<'a> Drop for RunDestructor<'a> {
fn drop(&mut self) {
*self.0 = true;
}
}
{
// Make sure PinVec runs the destructors
let mut v = PinVec::new();
v.push(RunDestructor(&mut destructor_ran));
}
assert!(destructor_ran);
}
}
|
// # The Rust Programing Language
//
// You made it! That was a sizable chapter: you learned about variables, scalar and compound data
// types, functions, comments, if expressions, and loops! If you want to practice with the concepts
// discussed in this chapter, try building programs to do the following:
//
// - Generate the nth Fibonacci number.
//
// [Source](https://doc.rust-lang.org/book/ch03-05-control-flow.html)
//
use std::io::{self, Write};
fn main() {
let nth = prompt_for_user_input();
println!("{}", fibonacci(nth));
}
fn prompt_for_user_input() -> usize {
loop {
print!("nth fibonacci number > ");
let _ = io::stdout().flush();
let mut entry = String::new();
io::stdin()
.read_line(&mut entry)
.expect("Failed to read line");
match entry.trim().parse() {
Ok(num) => return num,
Err(_) => {
println!("Please enter a valid number");
continue;
}
}
}
}
fn fibonacci(nth: usize) -> i64 {
fn fibonacci_r(f_1: i64, f_2: i64, iteration: usize, desired: usize) -> i64 {
if iteration == desired {
f_1 + f_2
} else {
fibonacci_r(f_1 + f_2, f_1, iteration + 1, desired)
}
}
match nth {
0 => 0,
1 => 1,
2 => 1,
_ => fibonacci_r(1, 1, 3, nth),
}
}
|
//! This is just an example.
use kvstore::{KVStore, Operations};
fn main() {
let mut kvs = KVStore::new(".").unwrap();
kvs.insert(String::from("key"), 2 as i32).unwrap();
kvs.lookup::<String, i32>(String::from("key")).unwrap();
kvs.remove::<String, i32>(String::from("key")).unwrap();
}
|
// Copyright (c) 2017 Martijn Rijkeboer <mrr@sru-systems.com>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Library for hashing passwords using
//! [Argon2](https://github.com/P-H-C/phc-winner-argon2), the password-hashing
//! function that won the
//! [Password Hashing Competition (PHC)](https://password-hashing.net).
//!
//! # Usage
//!
//! To use this crate, add the following to your Cargo.toml:
//!
//! ```toml
//! [dependencies]
//! rust-argon2 = "2.0"
//! ```
//!
//! And the following to your crate root:
//!
//! ```rust
//! extern crate argon2;
//! ```
//!
//! # Examples
//!
//! Create a password hash using the defaults and verify it:
//!
//! ```rust
//! use argon2::{self, Config};
//!
//! let password = b"password";
//! let salt = b"randomsalt";
//! let config = Config::default();
//! let hash = argon2::hash_encoded(password, salt, &config).unwrap();
//! let matches = argon2::verify_encoded(&hash, password).unwrap();
//! assert!(matches);
//! ```
//!
//! Create a password hash with custom settings and verify it:
//!
//! ```rust
//! use argon2::{self, Config, Variant, Version};
//!
//! let password = b"password";
//! let salt = b"othersalt";
//! let config = Config {
//! variant: Variant::Argon2i,
//! version: Version::Version13,
//! mem_cost: 65536,
//! time_cost: 10,
//! lanes: 4,
//! secret: &[],
//! ad: &[],
//! hash_length: 32
//! };
//! let hash = argon2::hash_encoded(password, salt, &config).unwrap();
//! let matches = argon2::verify_encoded(&hash, password).unwrap();
//! assert!(matches);
//! ```
//!
//! # Limitations
//!
//! This crate has the same limitation as the `blake2-rfc` crate that it uses.
//! It does not attempt to clear potentially sensitive data from its work
//! memory. To do so correctly without a heavy performance penalty would
//! require help from the compiler. It's better to not attempt to do so than to
//! present a false assurance.
//!
//! This version uses the standard implementation and does not yet implement
//! optimizations. Therefore, it is not the fastest implementation available.
mod argon2;
mod block;
mod common;
mod config;
mod context;
mod core;
mod decoded;
mod encoding;
mod error;
mod memory;
mod result;
mod variant;
mod version;
pub use crate::argon2::*;
pub use crate::config::Config;
pub use crate::error::Error;
pub use crate::result::Result;
pub use crate::variant::Variant;
pub use crate::version::Version;
|
use nabi;
use abi;
use handle::Handle;
use channel::WriteChannel;
pub struct Interrupt(Handle);
impl Interrupt {
pub fn create(channel: WriteChannel, vector: u8) -> nabi::Result<Interrupt> {
let res: nabi::Result<u32> = unsafe {
abi::interrupt_create((channel.0).0, vector)
}.into();
res.map(|handle| Interrupt(Handle(handle)))
}
pub fn ack(&self) -> nabi::Result<()> {
let res: nabi::Result<u32> = unsafe {
abi::interrupt_ack((self.0).0)
}.into();
res.map(|_| ())
}
} |
pub static USER_AGENT: &'static str = "cfdnsupdater/0.1.0";
|
use projecteuler::binomial;
use projecteuler::helper;
fn main() {
helper::check_bench(|| solve(100));
assert_eq!(solve(100), 51161058134250);
helper::check_bench(|| solve_2(100));
assert_eq!(solve_2(100), 51161058134250);
dbg!(solve(100));
}
//also works, but is much slower (about factor 70 if one is to believe the benchmarks)
fn solve_2(digits: usize) -> usize {
let mut total = 0;
for i in 1..=digits {
total += binomial::binomial_coefficient(8 + i, i);
total += binomial::binomial_coefficient(9 + i, i);
total -= 10;
}
total
}
fn solve(digits: usize) -> usize {
//idea: recursion on number of digits
let inc = count_increasing(digits);
let dec = count_decreasing(digits);
inc + dec - 9 * digits
}
fn count_increasing(mut digits: usize) -> usize {
let mut total = 0;
let mut current = [1; 9];
while digits > 1 {
let mut s = 0;
for i in 0..current.len() {
s += current[i];
current[i] = s;
}
total += s;
digits -= 1;
}
total + current.iter().cloned().sum::<usize>()
}
fn count_decreasing(mut digits: usize) -> usize {
let mut total = 0;
let mut current = [1; 10];
current[0] = 0;
while digits > 1 {
let mut s = 0;
for i in (0..current.len()).rev() {
s += current[i];
current[i] = s;
}
total += s;
digits -= 1;
}
total + current.iter().cloned().sum::<usize>()
}
|
use core::hash::{Hash, Hasher};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use crate::{handle::Handle, registry::Registry};
use indexmap::IndexMap;
use metrics::{Key, Recorder, Unit};
/// Metric kinds.
#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy, Ord, PartialOrd)]
pub enum MetricKind {
/// Counter.
Counter,
/// Gauge.
Gauge,
/// Histogram.
Histogram,
}
#[derive(Eq, PartialEq, Hash, Clone)]
struct DifferentiatedKey(MetricKind, Key);
impl DifferentiatedKey {
pub fn into_parts(self) -> (MetricKind, Key) {
(self.0, self.1)
}
}
/// A point-in-time value for a metric exposing raw values.
#[derive(Debug, PartialEq)]
pub enum DebugValue {
/// Counter.
Counter(u64),
/// Gauge.
Gauge(f64),
/// Histogram.
Histogram(Vec<u64>),
}
// We don't care that much about total equality nuances here.
impl Eq for DebugValue {}
impl Hash for DebugValue {
fn hash<H: Hasher>(&self, state: &mut H) {
match self {
Self::Counter(val) => val.hash(state),
Self::Gauge(val) => {
// Whatever works, we don't really care in here...
if val.is_normal() {
val.to_ne_bytes().hash(state)
} else {
0f64.to_ne_bytes().hash(state)
}
}
Self::Histogram(val) => val.hash(state),
}
}
}
/// Captures point-in-time snapshots of `DebuggingRecorder`.
pub struct Snapshotter {
registry: Arc<Registry<DifferentiatedKey, Handle>>,
metrics: Arc<Mutex<IndexMap<DifferentiatedKey, ()>>>,
units: Arc<Mutex<HashMap<DifferentiatedKey, Unit>>>,
descriptions: Arc<Mutex<HashMap<DifferentiatedKey, &'static str>>>,
}
impl Snapshotter {
/// Takes a snapshot of the recorder.
pub fn snapshot(
&self,
) -> Vec<(
MetricKind,
Key,
Option<Unit>,
Option<&'static str>,
DebugValue,
)> {
let mut snapshot = Vec::new();
let handles = self.registry.get_handles();
let metrics = {
let metrics = self.metrics.lock().expect("metrics lock poisoned");
metrics.clone()
};
for (dkey, _) in metrics.into_iter() {
if let Some(handle) = handles.get(&dkey) {
let unit = self
.units
.lock()
.expect("units lock poisoned")
.get(&dkey)
.cloned();
let description = self
.descriptions
.lock()
.expect("descriptions lock poisoned")
.get(&dkey)
.cloned();
let (kind, key) = dkey.into_parts();
let value = match kind {
MetricKind::Counter => DebugValue::Counter(handle.read_counter()),
MetricKind::Gauge => DebugValue::Gauge(handle.read_gauge()),
MetricKind::Histogram => DebugValue::Histogram(handle.read_histogram()),
};
snapshot.push((kind, key, unit, description, value));
}
}
snapshot
}
}
/// A simplistic recorder that can be installed and used for debugging or testing.
///
/// Callers can easily take snapshots of the metrics at any given time and get access
/// to the raw values.
pub struct DebuggingRecorder {
registry: Arc<Registry<DifferentiatedKey, Handle>>,
metrics: Arc<Mutex<IndexMap<DifferentiatedKey, ()>>>,
units: Arc<Mutex<HashMap<DifferentiatedKey, Unit>>>,
descriptions: Arc<Mutex<HashMap<DifferentiatedKey, &'static str>>>,
}
impl DebuggingRecorder {
/// Creates a new `DebuggingRecorder`.
pub fn new() -> DebuggingRecorder {
DebuggingRecorder {
registry: Arc::new(Registry::new()),
metrics: Arc::new(Mutex::new(IndexMap::new())),
units: Arc::new(Mutex::new(HashMap::new())),
descriptions: Arc::new(Mutex::new(HashMap::new())),
}
}
/// Gets a `Snapshotter` attached to this recorder.
pub fn snapshotter(&self) -> Snapshotter {
Snapshotter {
registry: self.registry.clone(),
metrics: self.metrics.clone(),
units: self.units.clone(),
descriptions: self.descriptions.clone(),
}
}
fn register_metric(&self, rkey: DifferentiatedKey) {
let mut metrics = self.metrics.lock().expect("metrics lock poisoned");
let _ = metrics.insert(rkey.clone(), ());
}
fn insert_unit_description(
&self,
rkey: DifferentiatedKey,
unit: Option<Unit>,
description: Option<&'static str>,
) {
if let Some(unit) = unit {
let mut units = self.units.lock().expect("units lock poisoned");
let uentry = units.entry(rkey.clone()).or_insert_with(|| unit.clone());
*uentry = unit;
}
if let Some(description) = description {
let mut descriptions = self.descriptions.lock().expect("description lock poisoned");
let dentry = descriptions.entry(rkey).or_insert_with(|| description);
*dentry = description;
}
}
/// Installs this recorder as the global recorder.
pub fn install(self) -> Result<(), metrics::SetRecorderError> {
metrics::set_boxed_recorder(Box::new(self))
}
}
impl Recorder for DebuggingRecorder {
fn register_counter(&self, key: Key, unit: Option<Unit>, description: Option<&'static str>) {
let rkey = DifferentiatedKey(MetricKind::Counter, key);
self.register_metric(rkey.clone());
self.insert_unit_description(rkey.clone(), unit, description);
self.registry.op(rkey, |_| {}, || Handle::counter())
}
fn register_gauge(&self, key: Key, unit: Option<Unit>, description: Option<&'static str>) {
let rkey = DifferentiatedKey(MetricKind::Gauge, key);
self.register_metric(rkey.clone());
self.insert_unit_description(rkey.clone(), unit, description);
self.registry.op(rkey, |_| {}, || Handle::gauge())
}
fn register_histogram(&self, key: Key, unit: Option<Unit>, description: Option<&'static str>) {
let rkey = DifferentiatedKey(MetricKind::Histogram, key);
self.register_metric(rkey.clone());
self.insert_unit_description(rkey.clone(), unit, description);
self.registry.op(rkey, |_| {}, || Handle::histogram())
}
fn increment_counter(&self, key: Key, value: u64) {
let rkey = DifferentiatedKey(MetricKind::Counter, key);
self.register_metric(rkey.clone());
self.registry.op(
rkey,
|handle| handle.increment_counter(value),
|| Handle::counter(),
)
}
fn update_gauge(&self, key: Key, value: f64) {
let rkey = DifferentiatedKey(MetricKind::Gauge, key);
self.register_metric(rkey.clone());
self.registry.op(
rkey,
|handle| handle.update_gauge(value),
|| Handle::gauge(),
)
}
fn record_histogram(&self, key: Key, value: u64) {
let rkey = DifferentiatedKey(MetricKind::Histogram, key);
self.register_metric(rkey.clone());
self.registry.op(
rkey,
|handle| handle.record_histogram(value),
|| Handle::histogram(),
)
}
}
|
use std::error;
use fat;
pub fn list_files(args: &[String])
-> Result<(), Box<error::Error>>
{
expect_args!(args, 1);
let image_fn = args[0].clone();
let image = fat::Image::from_file(image_fn)?;
println!(" Volume {}", image.volume_label()?);
println!(" Volume has {} bytes per sector\n", image.sector_size());
let mut file_count = 0;
let mut size_total = 0;
for entry in image.root_entries() {
if entry.rest_are_free() {
break;
} else if entry.is_free() || entry.is_volume_label() {
continue;
}
file_count += 1;
size_total += entry.file_size;
println!(
"{}\t{}\t{}\t\t{}",
entry.last_write_date,
entry.last_write_time,
entry.file_size,
entry.filename().unwrap_or("????????.???".to_string()),
);
}
println!("\t{} File(s)\t\t{} bytes", file_count, size_total);
Ok(())
}
|
use crate::resources::Resources;
use crate::paths::get_save_path;
use serde_derive::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct SavedData {
is_infinite_unlocked: bool,
wave_record: usize,
}
impl Default for SavedData {
fn default() -> Self {
Self {
is_infinite_unlocked: false,
wave_record: 0,
}
}
}
pub fn is_infinite_unlocked(resources: &Resources) -> bool {
let d = resources
.fetch::<SavedData>()
.expect("Should have SavedData...");
d.is_infinite_unlocked
}
pub fn get_wave_record(resources: &Resources) -> usize {
let d = resources
.fetch::<SavedData>()
.expect("Should have SavedData...");
d.wave_record
}
pub fn save_new_wave_record(resources: &Resources, new_record: usize) -> Result<(), anyhow::Error> {
let mut d = resources
.fetch_mut::<SavedData>()
.expect("Should have SavedData...");
if new_record > d.wave_record {
d.wave_record = new_record;
}
let base_path = get_save_path();
let save_path = base_path.join("data.bin");
let data = bincode::serialize(&*d)?;
std::fs::write(save_path, data)?;
Ok(())
}
pub fn save_unlocked(resources: &Resources) -> Result<(), anyhow::Error> {
let mut d = resources
.fetch_mut::<SavedData>()
.expect("Should have SavedData...");
d.is_infinite_unlocked = true;
let base_path = get_save_path();
let save_path = base_path.join("data.bin");
let data = bincode::serialize(&*d)?;
std::fs::write(save_path, data)?;
Ok(())
}
pub fn read_saved_data() -> SavedData {
let base_path = get_save_path();
let save_path = base_path.join("data.bin");
if let Ok(data) = std::fs::read(&save_path) {
if let Ok(saved) = bincode::deserialize(&data) {
return saved;
}
}
let d = SavedData::default();
if let Err(e) = std::fs::write(save_path, bincode::serialize(&d).expect("Error here...")) {
error!("Cannot save game data = {:?}", e);
}
d
}
|
// Copyright 2023 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::sync::Arc;
use common_exception::Result;
use common_expression::DataBlock;
use common_pipeline_core::processors::port::InputPort;
use common_pipeline_core::processors::port::OutputPort;
use common_pipeline_core::processors::processor::Event;
use common_pipeline_core::processors::Processor;
#[async_trait::async_trait]
pub trait AsyncAccumulatingTransform: Send {
const NAME: &'static str;
async fn transform(&mut self, data: DataBlock) -> Result<Option<DataBlock>>;
async fn on_finish(&mut self, _output: bool) -> Result<Option<DataBlock>> {
Ok(None)
}
}
pub struct AsyncAccumulatingTransformer<T: AsyncAccumulatingTransform + 'static> {
inner: T,
input: Arc<InputPort>,
output: Arc<OutputPort>,
called_on_finish: bool,
input_data: Option<DataBlock>,
output_data: Option<DataBlock>,
}
impl<T: AsyncAccumulatingTransform + 'static> AsyncAccumulatingTransformer<T> {
pub fn create(input: Arc<InputPort>, output: Arc<OutputPort>, inner: T) -> Box<dyn Processor> {
Box::new(Self {
inner,
input,
output,
input_data: None,
output_data: None,
called_on_finish: false,
})
}
}
#[async_trait::async_trait]
impl<T: AsyncAccumulatingTransform + 'static> Processor for AsyncAccumulatingTransformer<T> {
fn name(&self) -> String {
String::from(T::NAME)
}
fn as_any(&mut self) -> &mut dyn Any {
self
}
fn event(&mut self) -> Result<Event> {
if self.output.is_finished() {
if !self.called_on_finish {
return Ok(Event::Async);
}
self.input.finish();
return Ok(Event::Finished);
}
if !self.output.can_push() {
self.input.set_not_need_data();
return Ok(Event::NeedConsume);
}
if let Some(data_block) = self.output_data.take() {
self.output.push_data(Ok(data_block));
return Ok(Event::NeedConsume);
}
if self.input_data.is_some() {
return Ok(Event::Async);
}
if self.input.has_data() {
self.input_data = Some(self.input.pull_data().unwrap()?);
return Ok(Event::Async);
}
if self.input.is_finished() {
return match !self.called_on_finish {
true => Ok(Event::Async),
false => {
self.output.finish();
Ok(Event::Finished)
}
};
}
self.input.set_need_data();
Ok(Event::NeedData)
}
async fn async_process(&mut self) -> Result<()> {
if let Some(data_block) = self.input_data.take() {
self.output_data = self.inner.transform(data_block).await?;
return Ok(());
}
if !self.called_on_finish {
self.called_on_finish = true;
self.output_data = self.inner.on_finish(true).await?;
}
Ok(())
}
}
|
use super::colorspace::ColorARGB;
fn clamp<T: PartialOrd>(value: T, min_value: T, max_value: T) -> T {
if max_value < value {
return max_value;
}
if value < min_value {
return min_value;
}
value
}
fn channels_float(arg: ColorARGB<u8>) -> (f64, f64, f64, f64) {
(
arg.a as f64 / 255.0,
arg.r as f64 / 255.0,
arg.g as f64 / 255.0,
arg.b as f64 / 255.0,
)
}
pub enum Mode {
Over,
}
pub type Operation = fn(ColorARGB<u8>, ColorARGB<u8>) -> ColorARGB<u8>;
impl Mode {
fn operation(&self) -> Operation {
match *self {
Mode::Over => porter_duff_over,
}
}
}
pub fn porter_duff_over(apx: ColorARGB<u8>, bpx: ColorARGB<u8>) -> ColorARGB<u8> {
let (aal, are, agr, abl) = channels_float(apx);
let (bal, bre, bgr, bbl) = channels_float(bpx);
let a = aal + bal * (1.0 - aal);
let r = (are * aal + bre * bal * (1.0 - aal)) / a;
let g = (agr * aal + bgr * bal * (1.0 - aal)) / a;
let b = (abl * aal + bbl * bal * (1.0 - aal)) / a;
assert!(0.0 <= r);
assert!(0.0 <= g);
assert!(0.0 <= b);
assert!(r <= 1.0);
assert!(g <= 1.0);
assert!(b <= 1.0);
let a = clamp((255.0 * a) as u32, 0, 255) as u8;
let r = clamp((255.0 * r) as u32, 0, 255) as u8;
let g = clamp((255.0 * g) as u32, 0, 255) as u8;
let b = clamp((255.0 * b) as u32, 0, 255) as u8;
ColorARGB::new_argb(a, r, g, b)
}
pub unsafe fn porter_duff(tgt: &mut [u32], src: &[u32], dst: &[u32], mode: Mode) -> Result<(), &'static str> {
use std::mem::transmute;
if tgt.len() != src.len() {
return Err("tgt/src len mismatch");
}
if dst.len() != src.len() {
return Err("dst/src len mismatch");
}
let tgt: &mut [ColorARGB<u8>] = transmute(tgt);
let src: &[ColorARGB<u8>] = transmute(src);
let dst: &[ColorARGB<u8>] = transmute(dst);
let op_func = mode.operation();
for (tpx, (spx, dpx)) in tgt.iter_mut().zip(src.iter().zip(dst.iter())) {
*tpx = op_func(*spx, *dpx);
}
Ok(())
}
pub unsafe fn porter_duff_inplace_dst(tgt: &mut [u32], dst: &[u32], mode: Mode) -> Result<(), &'static str> {
use std::mem::transmute;
if tgt.len() != dst.len() {
return Err("tgt/dst len mismatch");
}
let tgt: &mut [ColorARGB<u8>] = transmute(tgt);
let dst: &[ColorARGB<u8>] = transmute(dst);
let op_func = mode.operation();
for (tpx, dpx) in tgt.iter_mut().zip(dst.iter()) {
*tpx = op_func(*tpx, *dpx);
}
Ok(())
}
pub unsafe fn porter_duff_inplace_src(tgt: &mut [u32], src: &[u32], mode: Mode) -> Result<(), &'static str> {
use std::mem::transmute;
if tgt.len() != src.len() {
return Err("tgt/src len mismatch");
}
let tgt: &mut [ColorARGB<u8>] = transmute(tgt);
let src: &[ColorARGB<u8>] = transmute(src);
let op_func = mode.operation();
for (tpx, spx) in tgt.iter_mut().zip(src.iter()) {
*tpx = op_func(*spx, *tpx);
}
Ok(())
}
|
#[derive(Serialize, Deserialize, Copy, Clone, Eq, PartialEq, Hash, Debug)]
pub enum Direction {
Long,
Short
}
|
use common::aoc::{load_input, run_many, print_time, print_result};
use std::collections::HashMap;
fn main() {
let input = load_input("day14");
let (chain, dur_parse) = run_many(1000, || ReactionChain::parse(&input));
let (res_part1, dur_part1) = run_many(100, || chain.min_opf(1));
let (res_part2, dur_part2) = run_many(100, || chain.max_fpo(1_000_000_000_000));
print_result("P1", res_part1);
print_result("P2", res_part2);
print_time("Parse", dur_parse);
print_time("P1", dur_part1);
print_time("P2", dur_part2);
}
struct ReactionChain {
list: Vec<Material>,
map: HashMap<String, usize>,
}
impl ReactionChain {
fn ensure(&mut self, name: &str) -> usize {
if let Some(index) = self.map.get(name) {
return *index;
};
let index = self.list.len();
self.list.push(Material{
amount: 0,
dependencies: Vec::with_capacity(4),
dependents: Vec::with_capacity(4),
});
self.map.insert(name.to_owned(), index);
index
}
fn min_opf(&self, fuel_count: u64) -> u64 {
let mut amounts = vec![0; self.list.len()];
let mut completed = vec![false; self.list.len()];
let mut deferred: Vec<usize> = Vec::with_capacity(self.list.len());
let mut remaining: Vec<usize> = (0..self.list.len()).collect();
let ore_index = self.map["ORE"];
let fuel_index = self.map["FUEL"];
amounts[fuel_index] = fuel_count;
remaining.swap_remove(ore_index);
while remaining.len() > 0 {
let mat_index = remaining.pop().unwrap();
// Fabricate not until all of the material is present.
let mut mise_en_place = true;
for dep_index in self.list[mat_index].dependents.iter().cloned() {
if !completed[dep_index] {
mise_en_place = false;
break;
}
}
if !mise_en_place {
// Save it for later.
deferred.push(mat_index);
// Before quitting, make sure that there aren't any deferred materials.
if remaining.len() == 0 {
remaining.extend(deferred.iter());
deferred.clear();
}
continue;
}
// Break it up into dependencies.
let material = &self.list[mat_index];
let amount = amounts[mat_index];
let needed = if amount % material.amount == 0 { amount / material.amount } else { (amount / material.amount) + 1 };
for dep in material.dependencies.iter() {
amounts[dep.index] += dep.amount * needed;
}
// Mark as completed, thus allowing dependencies to be manufactured.
completed[mat_index] = true;
// Before quitting, make sure that there aren't any deferred materials.
if remaining.len() == 0 {
remaining.extend(deferred.iter());
deferred.clear();
}
}
amounts[ore_index]
}
fn max_fpo(&self, ore_count: u64) -> u64 {
let mut last_good = 0;
let mut step = 1000000;
let mut current = 1;
loop {
let result = self.min_opf(current);
if result > ore_count {
current -= step;
step /= 2;
if step == 0 {
break;
}
} else {
last_good = current;
}
current += step;
}
last_good
}
fn new() -> ReactionChain {
ReactionChain {
map: HashMap::with_capacity(128),
list: Vec::with_capacity(128),
}
}
fn parse(input: &str) -> ReactionChain {
let mut chain = Self::new();
let ore_index = chain.ensure("ORE");
chain.list[ore_index].amount = 1;
for line in input.lines() {
if line.len() == 0 {
continue;
}
let (left, right) = {
let mut tokens = line.split("=>");
(tokens.next().unwrap(), tokens.next().unwrap())
};
let (result_name, result_amount) = parse_mat_qty(right);
let result_index = chain.ensure(result_name);
chain.list[result_index].amount = result_amount;
for token in left.split(',') {
let (dependency_name, dependency_amount) = parse_mat_qty(token);
let dependency_index = chain.ensure(dependency_name);
chain.list[result_index].dependencies.push(Dependency{
index: dependency_index,
amount: dependency_amount,
});
chain.list[dependency_index].dependents.push(result_index);
}
}
chain
}
}
struct Material {
amount: u64,
dependencies: Vec<Dependency>,
dependents: Vec<usize>
}
struct Dependency {
index: usize,
amount: u64,
}
fn parse_mat_qty(s: &str) -> (&str, u64) {
let mut amount = 0;
let mut start_index = 0;
let mut end_index = 0;
for (i, ch) in s.chars().enumerate() {
match ch {
'A'..='Z' => {
if start_index == 0 {
start_index = i;
end_index = i;
} else {
end_index = i;
}
}
'0'..='9' => {
amount *= 10;
amount += (ch as u8 - '0' as u8) as u64;
}
_ => {}
}
}
(&s[start_index..=end_index], amount)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_mat_qty() {
assert_eq!(parse_mat_qty("9 ORE"), ("ORE", 9));
assert_eq!(parse_mat_qty(" 1 GPVTF"), ("GPVTF", 1));
assert_eq!(parse_mat_qty(" 7 PSHF "), ("PSHF", 7));
assert_eq!(parse_mat_qty("179 ORE "), ("ORE", 179));
assert_eq!(parse_mat_qty(" 2 A"), ("A", 2));
}
const TEST_INPUT1: &str = "9 ORE => 2 A\n8 ORE => 3 B\n7 ORE => 5 C\n3 A, 4 B => 1 AB\n5 B, 7 C => 1 BC\n4 C, 1 A => 1 CA\n2 AB, 3 BC, 4 CA => 1 FUEL\n";
const TEST_INPUT2: &str = "157 ORE => 5 NZVS\n165 ORE => 6 DCFZ\n44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL\n12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ\n179 ORE => 7 PSHF\n177 ORE => 5 HKGWZ\n7 DCFZ, 7 PSHF => 2 XJWVT\n165 ORE => 2 GPVTF\n3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT\n";
const TEST_INPUT3: &str = "2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG\n17 NVRVD, 3 JNWZP => 8 VPVL\n53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL\n22 VJHF, 37 MNCFX => 5 FWMGM\n139 ORE => 4 NVRVD\n144 ORE => 7 JNWZP\n5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC\n5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV\n145 ORE => 6 MNCFX\n1 NVRVD => 8 CXFTF\n1 VJHF, 6 MNCFX => 4 RFSQX\n176 ORE => 6 VJHF";
const TEST_INPUT4: &str = "171 ORE => 8 CNZTR\n7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL\n114 ORE => 4 BHXH\n14 VRPVC => 6 BMBT\n6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL\n6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT\n15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW\n13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW\n5 BMBT => 4 WPTQ\n189 ORE => 9 KTJDG\n1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP\n12 VRPVC, 27 CNZTR => 2 XDBXC\n15 KTJDG, 12 BHXH => 5 XCVML\n3 BHXH, 2 VRPVC => 7 MZWV\n121 ORE => 7 VRPVC\n7 XCVML => 6 RJRHP\n5 BHXH, 4 VRPVC => 5 LTCX";
#[test]
fn test_part1() {
let test1 = ReactionChain::parse(TEST_INPUT1);
let test2 = ReactionChain::parse(TEST_INPUT2);
let test3 = ReactionChain::parse(TEST_INPUT3);
let test4 = ReactionChain::parse(TEST_INPUT4);
assert_eq!(test1.min_opf(1), 165);
assert_eq!(test2.min_opf(1), 13312);
assert_eq!(test3.min_opf(1), 180697);
assert_eq!(test4.min_opf(1), 2210736);
}
const PART2_GOAL: u64 = 1_000_000_000_000;
#[test]
fn test_part2() {
let test2 = ReactionChain::parse(TEST_INPUT2);
let test3 = ReactionChain::parse(TEST_INPUT3);
let test4 = ReactionChain::parse(TEST_INPUT4);
assert_eq!(test2.max_fpo(PART2_GOAL), 82892753);
assert_eq!(test3.max_fpo(PART2_GOAL), 5586022);
assert_eq!(test4.max_fpo(PART2_GOAL), 460664);
}
}
|
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_xml_rs;
extern crate log;
use serde_xml_rs::{from_str, to_string, wrap_primitives};
use serde::ser::Serializer;
fn init_logger() {
use log::{LogLevel, LogMetadata, LogRecord};
struct SimpleLogger;
impl log::Log for SimpleLogger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
metadata.level() <= LogLevel::Debug
}
fn log(&self, record: &LogRecord) {
if self.enabled(record.metadata()) {
println!("{} - {}", record.level(), record.args());
}
}
}
let _ = log::set_logger(|max_log_level| {
max_log_level.set(log::LogLevelFilter::Debug);
Box::new(SimpleLogger)
});
}
#[derive(Debug, Deserialize, PartialEq)]
struct Item {
name: String,
source: String,
}
#[test]
fn simple_struct_from_attributes() {
init_logger();
let s = r##"
<item name="hello" source="world.rs" />
"##;
let item: Item = from_str(s).unwrap();
assert_eq!(
item,
Item {
name: "hello".to_string(),
source: "world.rs".to_string(),
}
);
}
#[test]
fn multiple_roots_attributes() {
init_logger();
let s = r##"
<list>
<item name="hello" source="world.rs" />
<item name="hello" source="world.rs" />
</list>
"##;
let item: Vec<Item> = from_str(s).unwrap();
assert_eq!(
item,
vec![
Item {
name: "hello".to_string(),
source: "world.rs".to_string(),
},
Item {
name: "hello".to_string(),
source: "world.rs".to_string(),
},
]
);
}
#[test]
fn simple_struct_from_attribute_and_child() {
init_logger();
let s = r##"
<item name="hello">
<source>world.rs</source>
</item>
"##;
let item: Item = from_str(s).unwrap();
assert_eq!(
item,
Item {
name: "hello".to_string(),
source: "world.rs".to_string(),
}
);
}
#[derive(Debug, Deserialize, PartialEq)]
struct Project {
name: String,
items: Vec<Item>,
}
#[test]
fn nested_collection() {
init_logger();
let s = r##"
<project name="my_project">
<items>
<Item name="hello1" source="world1.rs" />
<Item name="hello2" source="world2.rs" />
</items>
</project>
"##;
let project: Project = from_str(s).unwrap();
assert_eq!(
project,
Project {
name: "my_project".to_string(),
items: vec![
Item {
name: "hello1".to_string(),
source: "world1.rs".to_string(),
},
Item {
name: "hello2".to_string(),
source: "world2.rs".to_string(),
},
],
}
);
}
#[derive(Debug, Deserialize, PartialEq)]
enum MyEnum {
A(String),
B { name: String, flag: bool },
C,
}
#[derive(Debug, Deserialize, PartialEq)]
struct MyEnums {
#[serde(rename = "$value")] items: Vec<MyEnum>,
}
#[test]
fn collection_of_enums() {
init_logger();
let s = r##"
<enums>
<items>
<A>test</A>
<B name="hello" flag="t" />
<C />
</items>
</enums>
"##;
let project: MyEnums = from_str(s).unwrap();
assert_eq!(
project,
MyEnums {
items: vec![
MyEnum::A("test".to_string()),
MyEnum::B {
name: "hello".to_string(),
flag: true,
},
MyEnum::C,
],
}
);
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(rename = "wrapper")]
struct Wrapper {
pub groups: Vec<Group>,
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
pub enum Type {
Simple,
Complex,
}
// Helper function for serializing Vec<String> as <identity>element<identity>
fn serialize_with_item_name<S>(item: &Vec<String>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
wrap_primitives(item, serializer, "identity")
}
#[derive(Debug, Deserialize, PartialEq, Serialize)]
#[serde(rename = "group")]
pub struct Group {
pub name: String,
#[serde(rename = "type")] pub _type: Type,
#[serde(serialize_with = "serialize_with_item_name")] pub members: MemberList,
pub active: bool,
}
impl ::std::convert::From<MemberList> for Vec<String> {
fn from(x: MemberList) -> Self {
x.0
}
}
impl ::std::ops::Deref for MemberList {
type Target = Vec<String>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl serde::ser::Serialize for MemberList {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serialize_with_item_name(&self.0, serializer)
}
}
#[serde(rename = "members")]
#[derive(Debug, Deserialize, PartialEq)]
pub struct MemberList(Vec<String>);
#[test]
fn deserialize_newtype_list() {
let s = "\
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<members>\
<identity>bill</identity>\
<identity>bob</identity>\
<identity>dave</identity>\
</members>\
";
let members: MemberList = from_str(s).unwrap();
let member_list = MemberList(vec![
"bill".to_string(),
"bob".to_string(),
"dave".to_string(),
]);
assert_eq!(members, member_list);
}
#[test]
fn deserialize_with_wrapped_list() {
let s = r##"
<wrapper>
<groups>
<group>
<name>my group</name>
<type>Simple</type>
<members>
<identity>bill</identity>
<identity>bob</identity>
<identity>dave</identity>
</members>
<active>true</active>
</group>
</groups>
</wrapper>
"##;
let wrapper: Wrapper = from_str(s).unwrap();
assert_eq!(
wrapper,
Wrapper {
groups: vec![
Group {
name: "my group".to_string(),
_type: Type::Simple,
members: MemberList(vec![
"bill".to_string(),
"bob".to_string(),
"dave".to_string(),
]),
active: true,
},
],
}
);
}
#[test]
fn serialize_with_wrapped_list() {
let s = "\
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<wrapper>\
<groups>\
<group>\
<name>my group</name>\
<type>Simple</type>\
<members>\
<identity>bill</identity>\
<identity>bob</identity>\
<identity>dave</identity>\
</members>\
<active>true</active>\
</group>\
</groups>\
</wrapper>\
";
let group = Wrapper {
groups: vec![
Group {
name: "my group".to_string(),
_type: Type::Simple,
members: MemberList(vec![
"bill".to_string(),
"bob".to_string(),
"dave".to_string(),
]),
active: true,
},
],
};
assert_eq!(to_string(&group).unwrap(), s);
}
#[test]
fn serialize_with_empty_list() {
let s = "\
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<wrapper>\
<groups>\
<group>\
<name>my group</name>\
<type>Complex</type>\
<members />\
<active>true</active>\
</group>\
</groups>\
</wrapper>\
";
let group = Wrapper {
groups: vec![
Group {
name: "my group".to_string(),
_type: Type::Complex,
members: MemberList(vec![]),
active: true,
},
],
};
assert_eq!(to_string(&group).unwrap(), s);
}
#[test]
fn deserialize_with_empty_list() {
let s = r##"
<wrapper>
<groups>
<group>
<name>my group</name>
<type>Complex</type>
<members/>
<active>true</active>
</group>
</groups>
</wrapper>
"##;
let wrapper: Wrapper = from_str(s).unwrap();
assert_eq!(
wrapper,
Wrapper {
groups: vec![
Group {
name: "my group".to_string(),
_type: Type::Complex,
members: MemberList(vec![]),
active: true,
},
],
}
);
}
#[test]
fn deserialize_with_badly_formed_list() {
let s = r##"
<wrapper>
<groups>
<group>
<name>my group</name>
<members>THIS IS MALFORMED</members>
<active>true</active>
</group>
</groups>
</wrapper>
"##;
let wrapper: Result<Wrapper, _> = from_str(s);
assert!(wrapper.is_err());
}
|
// Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use async_recursion::async_recursion;
use common_ast::ast::BinaryOperator;
use common_ast::ast::Expr;
use common_ast::ast::Expr::Array;
use common_ast::ast::GroupBy;
use common_ast::ast::Identifier;
use common_ast::ast::Join;
use common_ast::ast::JoinCondition;
use common_ast::ast::JoinOperator;
use common_ast::ast::Literal;
use common_ast::ast::OrderByExpr;
use common_ast::ast::Query;
use common_ast::ast::SelectStmt;
use common_ast::ast::SelectTarget;
use common_ast::ast::SetExpr;
use common_ast::ast::SetOperator;
use common_ast::ast::TableReference;
use common_exception::ErrorCode;
use common_exception::Result;
use common_exception::Span;
use common_expression::type_check::common_super_type;
use common_expression::types::DataType;
use common_functions::BUILTIN_FUNCTIONS;
use crate::binder::join::JoinConditions;
use crate::binder::project_set::SrfCollector;
use crate::binder::scalar_common::split_conjunctions;
use crate::binder::CteInfo;
use crate::binder::ExprContext;
use crate::binder::Visibility;
use crate::optimizer::SExpr;
use crate::planner::binder::scalar::ScalarBinder;
use crate::planner::binder::BindContext;
use crate::planner::binder::Binder;
use crate::plans::BoundColumnRef;
use crate::plans::CastExpr;
use crate::plans::EvalScalar;
use crate::plans::Filter;
use crate::plans::JoinType;
use crate::plans::ScalarExpr;
use crate::plans::ScalarItem;
use crate::plans::UnionAll;
use crate::ColumnBinding;
use crate::IndexType;
// A normalized IR for `SELECT` clause.
#[derive(Debug, Default)]
pub struct SelectList<'a> {
pub items: Vec<SelectItem<'a>>,
}
#[derive(Debug)]
pub struct SelectItem<'a> {
pub select_target: &'a SelectTarget,
pub scalar: ScalarExpr,
pub alias: String,
}
impl Binder {
pub(super) async fn bind_select_stmt(
&mut self,
bind_context: &mut BindContext,
stmt: &SelectStmt,
order_by: &[OrderByExpr],
) -> Result<(SExpr, BindContext)> {
let (mut s_expr, mut from_context) = if stmt.from.is_empty() {
self.bind_one_table(bind_context, stmt).await?
} else {
let cross_joins = stmt
.from
.iter()
.cloned()
.reduce(|left, right| TableReference::Join {
span: None,
join: Join {
op: JoinOperator::CrossJoin,
condition: JoinCondition::None,
left: Box::new(left),
right: Box::new(right),
},
})
.unwrap();
self.bind_table_reference(bind_context, &cross_joins)
.await?
};
let mut rewriter = SelectRewriter::new(
from_context.all_column_bindings(),
self.name_resolution_ctx.unquoted_ident_case_sensitive,
);
let new_stmt = rewriter.rewrite(stmt)?;
let stmt = new_stmt.as_ref().unwrap_or(stmt);
if let Some(expr) = &stmt.selection {
s_expr = self.bind_where(&mut from_context, expr, s_expr).await?;
}
let window_order_by_exprs = self.fetch_window_order_by_expr(&stmt.select_list).await;
// Collect set returning functions
let set_returning_functions = {
let mut collector = SrfCollector::new();
stmt.select_list.iter().for_each(|item| {
if let SelectTarget::AliasedExpr { expr, .. } = item {
collector.visit(expr);
}
});
collector.into_srfs()
};
// Bind set returning functions
s_expr = self
.bind_project_set(&mut from_context, &set_returning_functions, s_expr)
.await?;
// Generate a analyzed select list with from context
let mut select_list = self
.normalize_select_list(&mut from_context, &stmt.select_list)
.await?;
// This will potentially add some alias group items to `from_context` if find some.
if let Some(group_by) = stmt.group_by.as_ref() {
self.analyze_group_items(&mut from_context, &select_list, group_by)
.await?;
}
self.analyze_window_select(&mut from_context, &mut select_list)?;
self.analyze_aggregate_select(&mut from_context, &mut select_list)?;
// `analyze_projection` should behind `analyze_aggregate_select` because `analyze_aggregate_select` will rewrite `grouping`.
let (mut scalar_items, projections) = self.analyze_projection(&select_list)?;
let having = if let Some(having) = &stmt.having {
Some(
self.analyze_aggregate_having(&mut from_context, &select_list, having)
.await?,
)
} else {
None
};
let mut window_order_by_items = vec![];
for order_by_expr in window_order_by_exprs.iter() {
window_order_by_items.push(
self.fetch_window_order_items(
&from_context,
&mut scalar_items,
&projections,
order_by_expr,
)
.await?,
);
}
let order_items = self
.analyze_order_items(
&from_context,
&mut scalar_items,
&projections,
order_by,
stmt.distinct,
)
.await?;
if !from_context.aggregate_info.aggregate_functions.is_empty()
|| !from_context.aggregate_info.group_items.is_empty()
{
s_expr = self.bind_aggregate(&mut from_context, s_expr).await?;
}
if let Some((having, span)) = having {
s_expr = self
.bind_having(&mut from_context, having, span, s_expr)
.await?;
}
// bind window order by
for window_order_items in window_order_by_items {
s_expr = self
.bind_window_order_by(
&from_context,
window_order_items,
&select_list,
&mut scalar_items,
s_expr,
)
.await?;
}
// bind window
// window run after the HAVING clause but before the ORDER BY clause.
for window_info in bind_context.windows.iter() {
s_expr = self.bind_window_function(window_info, s_expr).await?;
}
if stmt.distinct {
s_expr = self.bind_distinct(
stmt.span,
&from_context,
&projections,
&mut scalar_items,
s_expr,
)?;
}
if !order_by.is_empty() {
s_expr = self
.bind_order_by(
&from_context,
order_items,
&select_list,
&mut scalar_items,
s_expr,
)
.await?;
}
s_expr = self.bind_projection(&mut from_context, &projections, &scalar_items, s_expr)?;
// add internal column binding into expr
s_expr = from_context.add_internal_column_into_expr(s_expr);
let mut output_context = BindContext::new();
output_context.parent = from_context.parent;
output_context.columns = from_context.columns;
output_context.ctes_map = from_context.ctes_map;
Ok((s_expr, output_context))
}
#[async_recursion]
pub(crate) async fn bind_set_expr(
&mut self,
bind_context: &mut BindContext,
set_expr: &SetExpr,
order_by: &[OrderByExpr],
) -> Result<(SExpr, BindContext)> {
match set_expr {
SetExpr::Select(stmt) => self.bind_select_stmt(bind_context, stmt, order_by).await,
SetExpr::Query(stmt) => self.bind_query(bind_context, stmt).await,
SetExpr::SetOperation(set_operation) => {
self.bind_set_operator(
bind_context,
&set_operation.left,
&set_operation.right,
&set_operation.op,
&set_operation.all,
)
.await
}
}
}
#[async_recursion]
pub(crate) async fn bind_query(
&mut self,
bind_context: &mut BindContext,
query: &Query,
) -> Result<(SExpr, BindContext)> {
if let Some(with) = &query.with {
for cte in with.ctes.iter() {
let table_name = cte.alias.name.name.clone();
if bind_context.ctes_map.contains_key(&table_name) {
return Err(ErrorCode::SemanticError(format!(
"duplicate cte {table_name}"
)));
}
let cte_info = CteInfo {
columns_alias: cte.alias.columns.iter().map(|c| c.name.clone()).collect(),
query: cte.query.clone(),
};
bind_context.ctes_map.insert(table_name, cte_info);
}
}
let (mut s_expr, bind_context) = match query.body {
SetExpr::Select(_) | SetExpr::Query(_) => {
self.bind_set_expr(bind_context, &query.body, &query.order_by)
.await?
}
SetExpr::SetOperation(_) => {
let (mut s_expr, mut bind_context) =
self.bind_set_expr(bind_context, &query.body, &[]).await?;
if !query.order_by.is_empty() {
s_expr = self
.bind_order_by_for_set_operation(&mut bind_context, s_expr, &query.order_by)
.await?;
}
(s_expr, bind_context)
}
};
if !query.limit.is_empty() {
if query.limit.len() == 1 {
s_expr = self
.bind_limit(&bind_context, s_expr, Some(&query.limit[0]), &query.offset)
.await?;
} else {
s_expr = self
.bind_limit(
&bind_context,
s_expr,
Some(&query.limit[1]),
&Some(query.limit[0].clone()),
)
.await?;
}
} else if query.offset.is_some() {
s_expr = self
.bind_limit(&bind_context, s_expr, None, &query.offset)
.await?;
}
Ok((s_expr, bind_context))
}
pub(super) async fn bind_where(
&mut self,
bind_context: &mut BindContext,
expr: &Expr,
child: SExpr,
) -> Result<SExpr> {
bind_context.set_expr_context(ExprContext::WhereClause);
let mut scalar_binder = ScalarBinder::new(
bind_context,
self.ctx.clone(),
&self.name_resolution_ctx,
self.metadata.clone(),
&[],
);
let (scalar, _) = scalar_binder.bind(expr).await?;
// if `Expr` is internal column, then add this internal column into `BindContext`
if let ScalarExpr::BoundInternalColumnRef(ref internal_column) = scalar {
bind_context
.add_internal_column_binding(&internal_column.column, self.metadata.clone());
};
let filter_plan = Filter {
predicates: split_conjunctions(&scalar),
is_having: false,
};
let new_expr = SExpr::create_unary(filter_plan.into(), child);
Ok(new_expr)
}
pub(super) async fn bind_set_operator(
&mut self,
bind_context: &mut BindContext,
left: &SetExpr,
right: &SetExpr,
op: &SetOperator,
all: &bool,
) -> Result<(SExpr, BindContext)> {
let (left_expr, left_bind_context) = self.bind_set_expr(bind_context, left, &[]).await?;
let (right_expr, right_bind_context) = self.bind_set_expr(bind_context, right, &[]).await?;
let mut coercion_types = Vec::with_capacity(left_bind_context.columns.len());
if left_bind_context.columns.len() != right_bind_context.columns.len() {
return Err(ErrorCode::SemanticError(
"SetOperation must have the same number of columns",
));
} else {
for (left_col, right_col) in left_bind_context
.columns
.iter()
.zip(right_bind_context.columns.iter())
{
if left_col.data_type != right_col.data_type {
let data_type = common_super_type(
*left_col.data_type.clone(),
*right_col.data_type.clone(),
&BUILTIN_FUNCTIONS.default_cast_rules,
)
.expect("SetOperation's types cannot be matched");
coercion_types.push(data_type);
} else {
coercion_types.push(*left_col.data_type.clone());
}
}
}
match (op, all) {
(SetOperator::Intersect, false) => {
// Transfer Intersect to Semi join
self.bind_intersect(
left.span(),
right.span(),
left_bind_context,
right_bind_context,
left_expr,
right_expr,
)
}
(SetOperator::Except, false) => {
// Transfer Except to Anti join
self.bind_except(
left.span(),
right.span(),
left_bind_context,
right_bind_context,
left_expr,
right_expr,
)
}
(SetOperator::Union, true) => self.bind_union(
left.span(),
right.span(),
left_bind_context,
right_bind_context,
coercion_types,
left_expr,
right_expr,
false,
),
(SetOperator::Union, false) => self.bind_union(
left.span(),
right.span(),
left_bind_context,
right_bind_context,
coercion_types,
left_expr,
right_expr,
true,
),
_ => Err(ErrorCode::Unimplemented(
"Unsupported query type, currently, databend only support intersect distinct and except distinct",
)),
}
}
#[allow(clippy::too_many_arguments)]
fn bind_union(
&mut self,
left_span: Span,
right_span: Span,
left_context: BindContext,
right_context: BindContext,
coercion_types: Vec<DataType>,
left_expr: SExpr,
right_expr: SExpr,
distinct: bool,
) -> Result<(SExpr, BindContext)> {
let (new_bind_context, pairs, left_expr, right_expr) = self.coercion_union_type(
left_span,
right_span,
left_context,
right_context,
left_expr,
right_expr,
coercion_types,
)?;
let union_plan = UnionAll { pairs };
let mut new_expr = SExpr::create_binary(union_plan.into(), left_expr, right_expr);
if distinct {
new_expr = self.bind_distinct(
left_span,
&new_bind_context,
new_bind_context.all_column_bindings(),
&mut HashMap::new(),
new_expr,
)?;
}
Ok((new_expr, new_bind_context))
}
fn bind_intersect(
&mut self,
left_span: Span,
right_span: Span,
left_context: BindContext,
right_context: BindContext,
left_expr: SExpr,
right_expr: SExpr,
) -> Result<(SExpr, BindContext)> {
self.bind_intersect_or_except(
left_span,
right_span,
left_context,
right_context,
left_expr,
right_expr,
JoinType::LeftSemi,
)
}
fn bind_except(
&mut self,
left_span: Span,
right_span: Span,
left_context: BindContext,
right_context: BindContext,
left_expr: SExpr,
right_expr: SExpr,
) -> Result<(SExpr, BindContext)> {
self.bind_intersect_or_except(
left_span,
right_span,
left_context,
right_context,
left_expr,
right_expr,
JoinType::LeftAnti,
)
}
#[allow(clippy::too_many_arguments)]
fn bind_intersect_or_except(
&mut self,
left_span: Span,
right_span: Span,
left_context: BindContext,
right_context: BindContext,
left_expr: SExpr,
right_expr: SExpr,
join_type: JoinType,
) -> Result<(SExpr, BindContext)> {
let left_expr = self.bind_distinct(
left_span,
&left_context,
left_context.all_column_bindings(),
&mut HashMap::new(),
left_expr,
)?;
let mut left_conditions = Vec::with_capacity(left_context.columns.len());
let mut right_conditions = Vec::with_capacity(right_context.columns.len());
assert_eq!(left_context.columns.len(), right_context.columns.len());
for (left_column, right_column) in left_context
.columns
.iter()
.zip(right_context.columns.iter())
{
left_conditions.push(
BoundColumnRef {
span: left_span,
column: left_column.clone(),
}
.into(),
);
right_conditions.push(
BoundColumnRef {
span: right_span,
column: right_column.clone(),
}
.into(),
);
}
let join_conditions = JoinConditions {
left_conditions,
right_conditions,
non_equi_conditions: vec![],
other_conditions: vec![],
};
let s_expr = self.bind_join_with_type(join_type, join_conditions, left_expr, right_expr)?;
Ok((s_expr, left_context))
}
#[allow(clippy::type_complexity)]
#[allow(clippy::too_many_arguments)]
fn coercion_union_type(
&self,
left_span: Span,
right_span: Span,
left_bind_context: BindContext,
right_bind_context: BindContext,
mut left_expr: SExpr,
mut right_expr: SExpr,
coercion_types: Vec<DataType>,
) -> Result<(BindContext, Vec<(IndexType, IndexType)>, SExpr, SExpr)> {
let mut left_scalar_items = Vec::with_capacity(left_bind_context.columns.len());
let mut right_scalar_items = Vec::with_capacity(right_bind_context.columns.len());
let mut new_bind_context = BindContext::new();
let mut pairs = Vec::with_capacity(left_bind_context.columns.len());
for (idx, (left_col, right_col)) in left_bind_context
.columns
.iter()
.zip(right_bind_context.columns.iter())
.enumerate()
{
let left_index = if *left_col.data_type != coercion_types[idx] {
let new_column_index = self
.metadata
.write()
.add_derived_column(left_col.column_name.clone(), coercion_types[idx].clone());
let column_binding = ColumnBinding {
database_name: None,
table_name: None,
column_name: left_col.column_name.clone(),
index: new_column_index,
data_type: Box::new(coercion_types[idx].clone()),
visibility: Visibility::Visible,
};
let left_coercion_expr = CastExpr {
span: left_span,
is_try: false,
argument: Box::new(
BoundColumnRef {
span: left_span,
column: left_col.clone(),
}
.into(),
),
target_type: Box::new(coercion_types[idx].clone()),
};
left_scalar_items.push(ScalarItem {
scalar: left_coercion_expr.into(),
index: new_column_index,
});
new_bind_context.add_column_binding(column_binding);
new_column_index
} else {
new_bind_context.add_column_binding(left_col.clone());
left_col.index
};
let right_index = if *right_col.data_type != coercion_types[idx] {
let new_column_index = self
.metadata
.write()
.add_derived_column(right_col.column_name.clone(), coercion_types[idx].clone());
let right_coercion_expr = CastExpr {
span: right_span,
is_try: false,
argument: Box::new(
BoundColumnRef {
span: right_span,
column: right_col.clone(),
}
.into(),
),
target_type: Box::new(coercion_types[idx].clone()),
};
right_scalar_items.push(ScalarItem {
scalar: right_coercion_expr.into(),
index: new_column_index,
});
new_column_index
} else {
right_col.index
};
pairs.push((left_index, right_index));
}
if !left_scalar_items.is_empty() {
left_expr = SExpr::create_unary(
EvalScalar {
items: left_scalar_items,
}
.into(),
left_expr,
);
}
if !right_scalar_items.is_empty() {
right_expr = SExpr::create_unary(
EvalScalar {
items: right_scalar_items,
}
.into(),
right_expr,
);
}
Ok((new_bind_context, pairs, left_expr, right_expr))
}
}
/// It is useful when implementing some SQL syntax sugar,
///
/// [`column_binding`] contains the column binding information of the SelectStmt.
///
/// to rewrite the SelectStmt, just add a new rewrite_* function and call it in the `rewrite` function.
struct SelectRewriter<'a> {
column_binding: &'a [ColumnBinding],
new_stmt: Option<SelectStmt>,
is_unquoted_ident_case_sensitive: bool,
}
// helper functions to SelectRewriter
impl<'a> SelectRewriter<'a> {
fn compare_unquoted_ident(&self, a: &str, b: &str) -> bool {
if self.is_unquoted_ident_case_sensitive {
a == b
} else {
a.eq_ignore_ascii_case(b)
}
}
fn parse_aggregate_function(expr: &Expr) -> Result<(&Identifier, &[Expr])> {
match expr {
Expr::FunctionCall { name, args, .. } => Ok((name, args)),
_ => Err(ErrorCode::SyntaxException("Aggregate function is required")),
}
}
fn ident_from_string(s: &str) -> Identifier {
Identifier {
name: s.to_string(),
quote: None,
span: None,
}
}
fn expr_eq_from_col_and_value(col: Identifier, value: Expr) -> Expr {
Expr::BinaryOp {
span: None,
left: Box::new(Expr::ColumnRef {
column: col,
span: None,
database: None,
table: None,
}),
op: BinaryOperator::Eq,
right: Box::new(value),
}
}
fn target_func_from_name_args(
name: Identifier,
args: Vec<Expr>,
alias: Option<Identifier>,
) -> SelectTarget {
SelectTarget::AliasedExpr {
expr: Box::new(Expr::FunctionCall {
span: Span::default(),
distinct: false,
name,
args,
params: vec![],
window: None,
}),
alias,
}
}
fn expr_literal_array_from_vec_ident(exprs: Vec<Identifier>) -> Expr {
Array {
span: Span::default(),
exprs: exprs
.into_iter()
.map(|expr| Expr::Literal {
span: None,
lit: Literal::String(expr.name),
})
.collect(),
}
}
fn expr_column_ref_array_from_vec_ident(exprs: Vec<Identifier>) -> Expr {
Array {
span: Span::default(),
exprs: exprs
.into_iter()
.map(|expr| Expr::ColumnRef {
span: None,
column: expr,
database: None,
table: None,
})
.collect(),
}
}
// For Expr::Literal, expr.to_string() is quoted, sometimes we need the raw string.
fn raw_string_from_literal_expr(expr: &Expr) -> Option<String> {
match expr {
Expr::Literal { lit, .. } => match lit {
Literal::String(v) => Some(v.clone()),
_ => Some(expr.to_string()),
},
_ => None,
}
}
}
impl<'a> SelectRewriter<'a> {
fn new(column_binding: &'a [ColumnBinding], is_unquoted_ident_case_sensitive: bool) -> Self {
SelectRewriter {
column_binding,
new_stmt: None,
is_unquoted_ident_case_sensitive,
}
}
fn rewrite(&mut self, stmt: &SelectStmt) -> Result<Option<SelectStmt>> {
self.rewrite_pivot(stmt)?;
self.rewrite_unpivot(stmt)?;
Ok(self.new_stmt.take())
}
fn rewrite_pivot(&mut self, stmt: &SelectStmt) -> Result<()> {
if stmt.from.len() != 1 || stmt.from[0].pivot().is_none() {
return Ok(());
}
let pivot = stmt.from[0].pivot().unwrap();
let (aggregate_name, aggregate_args) = Self::parse_aggregate_function(&pivot.aggregate)?;
let aggregate_columns = aggregate_args
.iter()
.map(|expr| match expr {
Expr::ColumnRef { column, .. } => Some(column.clone()),
_ => None,
})
.collect::<Option<Vec<_>>>()
.ok_or_else(|| ErrorCode::SyntaxException("Aggregate column not found"))?;
let aggregate_column_names = aggregate_columns
.iter()
.map(|col| col.name.as_str())
.collect::<Vec<_>>();
let new_group_by = stmt.group_by.clone().unwrap_or_else(|| {
GroupBy::Normal(
self.column_binding
.iter()
.filter(|col_bind| {
!self
.compare_unquoted_ident(&col_bind.column_name, &pivot.value_column.name)
&& !aggregate_column_names
.iter()
.any(|col| self.compare_unquoted_ident(col, &col_bind.column_name))
})
.map(|col| Expr::Literal {
span: Span::default(),
lit: Literal::UInt64(col.index as u64 + 1),
})
.collect(),
)
});
let mut new_select_list = stmt.select_list.clone();
if let Some(star) = new_select_list.iter_mut().find(|target| target.is_star()) {
let mut exclude_columns = aggregate_columns;
exclude_columns.push(pivot.value_column.clone());
star.exclude(exclude_columns);
};
let new_aggregate_name = Identifier {
name: format!("{}_if", aggregate_name.name),
..aggregate_name.clone()
};
for value in &pivot.values {
let mut args = aggregate_args.to_vec();
args.push(Self::expr_eq_from_col_and_value(
pivot.value_column.clone(),
value.clone(),
));
let alias = Self::raw_string_from_literal_expr(value)
.ok_or_else(|| ErrorCode::SyntaxException("Pivot value should be literal"))?;
new_select_list.push(Self::target_func_from_name_args(
new_aggregate_name.clone(),
args,
Some(Self::ident_from_string(&alias)),
));
}
if let Some(ref mut new_stmt) = self.new_stmt {
new_stmt.select_list = new_select_list;
new_stmt.group_by = Some(new_group_by);
} else {
self.new_stmt = Some(SelectStmt {
select_list: new_select_list,
group_by: Some(new_group_by),
..stmt.clone()
});
}
Ok(())
}
fn rewrite_unpivot(&mut self, stmt: &SelectStmt) -> Result<()> {
if stmt.from.len() != 1 || stmt.from[0].unpivot().is_none() {
return Ok(());
}
let unpivot = stmt.from[0].unpivot().unwrap();
let mut new_select_list = stmt.select_list.clone();
if let Some(star) = new_select_list.iter_mut().find(|target| target.is_star()) {
star.exclude(unpivot.names.clone());
};
new_select_list.push(Self::target_func_from_name_args(
Self::ident_from_string("unnest"),
vec![Self::expr_literal_array_from_vec_ident(
unpivot.names.clone(),
)],
Some(unpivot.column_name.clone()),
));
new_select_list.push(Self::target_func_from_name_args(
Self::ident_from_string("unnest"),
vec![Self::expr_column_ref_array_from_vec_ident(
unpivot.names.clone(),
)],
Some(unpivot.value_column.clone()),
));
if let Some(ref mut new_stmt) = self.new_stmt {
new_stmt.select_list = new_select_list;
} else {
self.new_stmt = Some(SelectStmt {
select_list: new_select_list,
..stmt.clone()
});
};
Ok(())
}
}
|
use crate::btree::key_value::KeyType;
use crate::btree::key_value::ValueType;
use std::collections::BTreeMap;
use std::error::Error;
pub struct MemBtree<'d, K: KeyType<'d>, V: ValueType<'d>> {
map: BTreeMap<&'d K, &'d V>,
count: usize,
}
impl<'d, K: KeyType<'d>, V: ValueType<'d>> MemBtree<'d, K, V> {
pub fn new() -> Result<MemBtree<'d, K, V>, Box<Error>> {
Ok(MemBtree {
map: BTreeMap::<K, V>::new(),
count: 0,
})
}
pub fn insert(&mut self, key: K, value: V) {
self.count += 1;
self.map.insert(key, value);
}
pub fn get(&self, key: K) -> Option<&V> {
self.map.get(&key)
}
}
|
#![feature(proc_macro_hygiene, decl_macro)]
#[macro_use] extern crate rocket;
#[macro_use] extern crate rocket_contrib;
#[macro_use] extern crate serde_derive;
use std::sync::Mutex;
use std::collections::HashMap;
use rocket::State;
use rocket_contrib::json::{Json, JsonValue};
// The type to represent the ID of a message.
type ID = usize;
// We're going to store all of the messages here. No need for a DB for now.
type RealEstateMap = Mutex<HashMap<ID, String>>;
#[derive(Serialize, Deserialize)]
struct RealEstate {
id: Option<ID>,
description: String
}
#[get("/", format = "json")]
fn getAll(map: State<RealEstateMap>) -> Json<Vec<RealEstate>> {
let hashmap = map.lock().unwrap();
Json(
hashmap.iter().map(|(key, value)| {
RealEstate {
id: Some(*key),
description: value.clone()
}
}).collect::<Vec<_>>())
}
#[get("/<id>", format = "json")]
fn get(id: ID, map: State<RealEstateMap>) -> Option<Json<RealEstate>> {
let hashmap = map.lock().unwrap();
hashmap.get(&id).map(|contents| {
Json(RealEstate {
id: Some(id),
description: contents.clone()
})
})
}
#[post("/<id>", format = "json", data = "<entity>")]
fn new(id: ID, entity: Json<RealEstate>, map: State<RealEstateMap>) -> JsonValue {
let mut hashmap = map.lock().expect("map lock.");
if hashmap.contains_key(&id) {
json!({
"status": "error",
"reason": "ID exists. Try put."
})
} else {
hashmap.insert(id, entity.0.description);
json!({ "status": "ok" })
}
}
#[put("/<id>", format = "json", data = "<entity>")]
fn update(id: ID, entity: Json<RealEstate>, map: State<RealEstateMap>) -> Option<JsonValue> {
let mut hashmap = map.lock().unwrap();
if hashmap.contains_key(&id) {
hashmap.insert(id, entity.0.description);
Some(json!({ "status": "ok" }))
} else {
None
}
}
#[catch(404)]
fn not_found() -> JsonValue {
json!({
"status": "error",
"reason": "Resource was not found."
})
}
fn rocket() -> rocket::Rocket {
rocket::ignite()
.mount("/realestates/", routes![getAll, get, new, update])
.register(catchers![not_found])
.manage(Mutex::new(HashMap::<ID, String>::new()))
}
fn main() {
rocket().launch();
}
|
#![warn(clippy::all)]
use secs::World;
fn main() {
let mut world = World::new();
let entity = world.new_entity();
world.add_component_to_entity(entity, Health(-10));
world.add_component_to_entity(entity, Name("Tom"));
let mut healths = world.borrow_component_vec_mut::<Health>().unwrap();
let mut names = world.borrow_component_vec_mut::<Name>().unwrap();
let zip = healths.iter_mut().zip(names.iter_mut());
let iter = zip.filter_map(|(health, name)| Some((health.as_mut()?, name.as_mut()?)));
iter.for_each(|(health, name)| {
if health.0 < 0 {
println!("{} has perished", name.0);
};
});
}
#[derive(Debug)]
struct Health(i32);
#[derive(Debug)]
struct Name(&'static str);
|
// Copyright 2019-2020 PureStake Inc.
// This file is part of Moonbeam.
// Moonbeam is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Moonbeam is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Moonbeam. If not, see <http://www.gnu.org/licenses/>.
//! # Crowdloan Rewards Pallet
//!
//! This pallet issues rewards to citizens who participated in a crowdloan on the backing relay
//! chain (eg Kusama) in order to help this parrachain acquire a parachain slot.
//!
//! ## Monetary Policy
//!
//! This is simple and mock for now. We can do whatever we want.
//! This pallet stores a constant "reward ratio" which is the number of reward tokens to pay per
//! contributed token. In simple cases this can be 1, but needs to be customizeable to allow for
//! vastly differing absolute token supplies between relay and para.
//! Vesting is also linear. No tokens are vested at genesis and they unlock linearly until a
//! predecided block number. Vesting computations happen on demand when payouts are requested. So
//! no block weight is ever wasted on this, and there is no "base-line" cost of updating vestings.
//! Like I said, we can anything we want there. Even a non-linear reward curve to disincentivize
//! whales.
//!
//! ## Payout Mechanism
//!
//! The current payout mechanism requires contributors to claim their payouts. Because they are
//! paying the transaction fees for this themselves, they can do it as often as every block, or
//! wait and claim the entire thing once it is fully vested. We could consider auto payouts if we
//! want.
//!
//! ## Sourcing Contribution Information
//!
//! The pallet can learn about the crowdloan contributions in several ways.
//!
//! * **Through the initialize_reward_vec extrinsic*
//!
//! The simplest way is to call the initialize_reward_vec through a democracy proposal/sudo call.
//! This makes sense in a scenario where the crowdloan took place entirely offchain.
//! This extrinsic initializes the associated and unassociated stoerage with the provided data
//!
//! * **ReadingRelayState**
//!
//! The most elegant, but most complex solution would be for the para to read the contributions
//! directly from the relay state. Blocked by https://github.com/paritytech/cumulus/issues/320 so
//! I won't pursue it further right now. I can't decide whether that would really add security /
//! trustlessness, or is just a sexy blockchain thing to do. Contributors can always audit the
//! democracy proposal and make sure their contribution is in it, so in that sense reading relay state
//! isn't necessary. But if a single contribution is left out, the rest of the contributors might
//! not care enough to delay network launch. The little guy might get censored.
#![cfg_attr(not(feature = "std"), no_std)]
use frame_support::pallet;
pub use pallet::*;
#[cfg(any(test, feature = "runtime-benchmarks"))]
mod benchmarks;
#[cfg(test)]
pub(crate) mod mock;
#[cfg(test)]
mod tests;
pub mod weights;
#[pallet]
pub mod pallet {
use crate::weights::WeightInfo;
use frame_support::traits::WithdrawReasons;
use frame_support::{
pallet_prelude::*,
traits::{Currency, ExistenceRequirement::AllowDeath},
PalletId,
};
use frame_system::pallet_prelude::*;
use sp_core::crypto::AccountId32;
use sp_runtime::traits::{
AccountIdConversion, AtLeast32BitUnsigned, BlockNumberProvider, Saturating, Verify,
};
use sp_runtime::{MultiSignature, Perbill};
use sp_std::collections::btree_map::BTreeMap;
use sp_std::vec;
use sp_std::vec::Vec;
#[pallet::pallet]
// The crowdloan rewards pallet
pub struct Pallet<T>(PhantomData<T>);
pub const PALLET_ID: PalletId = PalletId(*b"Crowdloa");
// The wrapper around which the reward changing message needs to be wrapped
pub const WRAPPED_BYTES_PREFIX: &[u8] = b"<Bytes>";
pub const WRAPPED_BYTES_POSTFIX: &[u8] = b"</Bytes>";
/// Configuration trait of this pallet.
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type
type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>;
/// Checker for the reward vec, is it initalized already?
type Initialized: Get<bool>;
/// Percentage to be payed at initialization
#[pallet::constant]
type InitializationPayment: Get<Perbill>;
// Max number of contributors that can be inserted at once in initialize_reward_vec
#[pallet::constant]
type MaxInitContributors: Get<u32>;
/// The minimum contribution to which rewards will be paid.
type MinimumReward: Get<BalanceOf<Self>>;
/// A fraction representing the percentage of proofs
/// that need to be presented to change a reward address through the relay keys
#[pallet::constant]
type RewardAddressRelayVoteThreshold: Get<Perbill>;
/// The currency in which the rewards will be paid (probably the parachain native currency)
type RewardCurrency: Currency<Self::AccountId>;
/// The AccountId type contributors used on the relay chain.
type RelayChainAccountId: Parameter
//TODO these AccountId32 bounds feel a little extraneous. I wonder if we can remove them.
+ Into<AccountId32>
+ From<AccountId32>
+ Ord;
// The origin that is allowed to change the reward address with relay signatures
type RewardAddressChangeOrigin: EnsureOrigin<Self::Origin>;
/// The type that will be used to track vesting progress
type VestingBlockNumber: AtLeast32BitUnsigned + Parameter + Default + Into<BalanceOf<Self>>;
/// The notion of time that will be used for vesting. Probably
/// either the relay chain or sovereign chain block number.
type VestingBlockProvider: BlockNumberProvider<BlockNumber = Self::VestingBlockNumber>;
type WeightInfo: WeightInfo;
}
pub type BalanceOf<T> = <<T as Config>::RewardCurrency as Currency<
<T as frame_system::Config>::AccountId,
>>::Balance;
/// Stores info about the rewards owed as well as how much has been vested so far.
/// For a primer on this kind of design, see the recipe on compounding interest
/// https://substrate.dev/recipes/fixed-point.html#continuously-compounding
#[derive(Default, Clone, Encode, Decode, RuntimeDebug, PartialEq)]
pub struct RewardInfo<T: Config> {
pub total_reward: BalanceOf<T>,
pub claimed_reward: BalanceOf<T>,
pub contributed_relay_addresses: Vec<T::RelayChainAccountId>,
}
// This hook is in charge of initializing the vesting height at the first block of the parachain
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_finalize(n: <T as frame_system::Config>::BlockNumber) {
// In the first block of the parachain we need to introduce the vesting block related info
if n == 1u32.into() {
<InitVestingBlock<T>>::put(T::VestingBlockProvider::current_block_number());
}
}
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Associate a native rewards_destination identity with a crowdloan contribution.
///
/// The caller needs to provide the unassociated relay account and a proof to succeed
/// with the association
/// The proof is nothing but a signature over the reward_address using the relay keys
#[pallet::weight(T::WeightInfo::associate_native_identity())]
pub fn associate_native_identity(
origin: OriginFor<T>,
reward_account: T::AccountId,
relay_account: T::RelayChainAccountId,
proof: MultiSignature,
) -> DispatchResultWithPostInfo {
ensure_signed(origin)?;
// Check the proof:
// 1. Is signed by an actual unassociated contributor
// 2. Signs a valid native identity
// Check the proof. The Proof consists of a Signature of the rewarded account with the
// claimer key
// The less costly checks will go first
// The relay account should be unassociated
let mut reward_info = UnassociatedContributions::<T>::get(&relay_account)
.ok_or(Error::<T>::NoAssociatedClaim)?;
// We ensure the relay chain id wast not yet associated to avoid multi-claiming
// We dont need this right now, as it will always be true if the above check is true
ensure!(
ClaimedRelayChainIds::<T>::get(&relay_account).is_none(),
Error::<T>::AlreadyAssociated
);
// For now I prefer that we dont support providing an existing account here
ensure!(
AccountsPayable::<T>::get(&reward_account).is_none(),
Error::<T>::AlreadyAssociated
);
let payload = reward_account.encode();
// Check the signature
Self::verify_signatures(
vec![(relay_account.clone(), proof)],
reward_info.clone(),
payload,
)?;
// Make the first payment
let first_payment = T::InitializationPayment::get() * reward_info.total_reward;
T::RewardCurrency::transfer(
&PALLET_ID.into_account(),
&reward_account,
first_payment,
AllowDeath,
)?;
Self::deposit_event(Event::InitialPaymentMade(
reward_account.clone(),
first_payment,
));
reward_info.claimed_reward = first_payment;
// Insert on payable
AccountsPayable::<T>::insert(&reward_account, &reward_info);
// Remove from unassociated
<UnassociatedContributions<T>>::remove(&relay_account);
// Insert in mapping
ClaimedRelayChainIds::<T>::insert(&relay_account, ());
// Emit Event
Self::deposit_event(Event::NativeIdentityAssociated(
relay_account,
reward_account,
reward_info.total_reward,
));
Ok(Default::default())
}
/// Change reward account by submitting proofs from relay accounts
///
/// The number of valid proofs needs to be bigger than 'RewardAddressRelayVoteThreshold'
/// The account to be changed needs to be submitted as 'previous_account'
/// Origin must be RewardAddressChangeOrigin
#[pallet::weight(T::WeightInfo::change_association_with_relay_keys(proofs.len() as u32))]
pub fn change_association_with_relay_keys(
origin: OriginFor<T>,
reward_account: T::AccountId,
previous_account: T::AccountId,
proofs: Vec<(T::RelayChainAccountId, MultiSignature)>,
) -> DispatchResultWithPostInfo {
// Check that the origin is the one able to change the reward addrss
T::RewardAddressChangeOrigin::ensure_origin(origin)?;
// For now I prefer that we dont support providing an existing account here
ensure!(
AccountsPayable::<T>::get(&reward_account).is_none(),
Error::<T>::AlreadyAssociated
);
// To avoid replay attacks, we make sure the payload contains the previous address too
// I am assuming no rational user will go back to a previously changed reward address
// b"<Bytes>" + "new_account" + "previous_account" + b"</Bytes>"
let mut payload = WRAPPED_BYTES_PREFIX.to_vec();
payload.append(&mut reward_account.encode());
payload.append(&mut previous_account.encode());
payload.append(&mut WRAPPED_BYTES_POSTFIX.to_vec());
// Get the reward info for the account to be changed
let reward_info = AccountsPayable::<T>::get(&previous_account)
.ok_or(Error::<T>::NoAssociatedClaim)?;
Self::verify_signatures(proofs, reward_info.clone(), payload)?;
// Remove fromon payable
AccountsPayable::<T>::remove(&previous_account);
// Insert on payable
AccountsPayable::<T>::insert(&reward_account, &reward_info);
// Emit Event
Self::deposit_event(Event::RewardAddressUpdated(
previous_account,
reward_account,
));
Ok(Default::default())
}
/// Collect whatever portion of your reward are currently vested.
#[pallet::weight(T::WeightInfo::claim())]
pub fn claim(origin: OriginFor<T>) -> DispatchResultWithPostInfo {
let payee = ensure_signed(origin)?;
let initialized = <Initialized<T>>::get();
ensure!(initialized, Error::<T>::RewardVecNotFullyInitializedYet);
// Calculate the veted amount on demand.
let mut info =
AccountsPayable::<T>::get(&payee).ok_or(Error::<T>::NoAssociatedClaim)?;
ensure!(
info.claimed_reward < info.total_reward,
Error::<T>::RewardsAlreadyClaimed
);
// Get the current block used for vesting purposes
let now = T::VestingBlockProvider::current_block_number();
// Substract the first payment from the vested amount
let first_paid = T::InitializationPayment::get() * info.total_reward;
// To calculate how much could the user have claimed already
let payable_period = now.saturating_sub(<InitVestingBlock<T>>::get());
// How much should the contributor have already claimed by this block?
// By multiplying first we allow the conversion to integer done with the biggest number
let period = EndVestingBlock::<T>::get() - InitVestingBlock::<T>::get();
let should_have_claimed = if period == 0u32.into() {
// Pallet is configured with a zero vesting period.
info.total_reward - first_paid
} else {
(info.total_reward - first_paid).saturating_mul(payable_period.into())
/ period.into()
};
// If the period is bigger than whats missing to pay, then return whats missing to pay
let payable_amount = if should_have_claimed >= (info.total_reward - first_paid) {
info.total_reward.saturating_sub(info.claimed_reward)
} else {
should_have_claimed + first_paid - info.claimed_reward
};
info.claimed_reward = info.claimed_reward.saturating_add(payable_amount);
AccountsPayable::<T>::insert(&payee, &info);
// This pallet controls an amount of funds and transfers them to each of the contributors
//TODO: contributors should have the balance locked for tranfers but not for democracy
T::RewardCurrency::transfer(
&PALLET_ID.into_account(),
&payee,
payable_amount,
AllowDeath,
)?;
// Emit event
Self::deposit_event(Event::RewardsPaid(payee, payable_amount));
Ok(Default::default())
}
/// Update reward address, proving that the caller owns the current native key
#[pallet::weight(T::WeightInfo::update_reward_address())]
pub fn update_reward_address(
origin: OriginFor<T>,
new_reward_account: T::AccountId,
) -> DispatchResultWithPostInfo {
let signer = ensure_signed(origin)?;
// Calculate the veted amount on demand.
let info = AccountsPayable::<T>::get(&signer).ok_or(Error::<T>::NoAssociatedClaim)?;
// For now I prefer that we dont support providing an existing account here
ensure!(
AccountsPayable::<T>::get(&new_reward_account).is_none(),
Error::<T>::AlreadyAssociated
);
// Remove previous rewarded account
AccountsPayable::<T>::remove(&signer);
// Update new rewarded acount
AccountsPayable::<T>::insert(&new_reward_account, &info);
// Emit event
Self::deposit_event(Event::RewardAddressUpdated(signer, new_reward_account));
Ok(Default::default())
}
/// This extrinsic completes the initialization if some checks are fullfiled. These checks are:
/// -The reward contribution money matches the crowdloan pot
/// -The end vesting block is higher than the init vesting block
/// -The initialization has not complete yet
#[pallet::weight(T::WeightInfo::complete_initialization())]
pub fn complete_initialization(
origin: OriginFor<T>,
lease_ending_block: T::VestingBlockNumber,
) -> DispatchResultWithPostInfo {
ensure_root(origin)?;
let initialized = <Initialized<T>>::get();
// This ensures there was no prior initialization
ensure!(
initialized == false,
Error::<T>::RewardVecAlreadyInitialized
);
// This ensures the end vesting block (when all funds are fully vested)
// is bigger than the init vesting block
ensure!(
lease_ending_block > InitVestingBlock::<T>::get(),
Error::<T>::VestingPeriodNonValid
);
let current_initialized_rewards = InitializedRewardAmount::<T>::get();
let reward_difference = Self::pot().saturating_sub(current_initialized_rewards);
// Ensure the difference is not bigger than the total number of contributors
ensure!(
reward_difference < TotalContributors::<T>::get().into(),
Error::<T>::RewardsDoNotMatchFund
);
// Burn the difference
let imbalance = T::RewardCurrency::withdraw(
&PALLET_ID.into_account(),
reward_difference,
WithdrawReasons::TRANSFER,
AllowDeath,
)
.expect("Shouldnt fail, as the fund should be enough to burn and nothing is locked");
drop(imbalance);
EndVestingBlock::<T>::put(lease_ending_block);
<Initialized<T>>::put(true);
Ok(Default::default())
}
/// Initialize the reward distribution storage. It shortcuts whenever an error is found
/// This does not enforce any checks other than making sure we dont go over funds
/// complete_initialization should perform any additional
#[pallet::weight(T::WeightInfo::initialize_reward_vec(rewards.len() as u32))]
pub fn initialize_reward_vec(
origin: OriginFor<T>,
rewards: Vec<(T::RelayChainAccountId, Option<T::AccountId>, BalanceOf<T>)>,
) -> DispatchResultWithPostInfo {
ensure_root(origin)?;
let initialized = <Initialized<T>>::get();
ensure!(
initialized == false,
Error::<T>::RewardVecAlreadyInitialized
);
// Ensure we are below the max number of contributors
ensure!(
rewards.len() as u32 <= T::MaxInitContributors::get(),
Error::<T>::TooManyContributors
);
// What is the amount initialized so far?
let mut current_initialized_rewards = InitializedRewardAmount::<T>::get();
// Total number of contributors
let mut total_contributors = TotalContributors::<T>::get();
let incoming_rewards: BalanceOf<T> = rewards
.iter()
.fold(0u32.into(), |acc: BalanceOf<T>, (_, _, reward)| {
acc + *reward
});
// Ensure we dont go over funds
ensure!(
current_initialized_rewards + incoming_rewards <= Self::pot(),
Error::<T>::BatchBeyondFundPot
);
for (relay_account, native_account, reward) in &rewards {
if ClaimedRelayChainIds::<T>::get(&relay_account).is_some()
|| UnassociatedContributions::<T>::get(&relay_account).is_some()
{
// Dont fail as this is supposed to be called with batch calls and we
// dont want to stall the rest of the contributions
Self::deposit_event(Event::InitializedAlreadyInitializedAccount(
relay_account.clone(),
native_account.clone(),
*reward,
));
continue;
}
if *reward < T::MinimumReward::get() {
// Don't fail as this is supposed to be called with batch calls and we
// dont want to stall the rest of the contributions
Self::deposit_event(Event::InitializedAccountWithNotEnoughContribution(
relay_account.clone(),
native_account.clone(),
*reward,
));
continue;
}
// If we have a native_account, we make the payment
let initial_payment = if let Some(native_account) = native_account {
let first_payment = T::InitializationPayment::get() * (*reward);
T::RewardCurrency::transfer(
&PALLET_ID.into_account(),
&native_account,
first_payment,
AllowDeath,
)?;
Self::deposit_event(Event::InitialPaymentMade(
native_account.clone(),
first_payment,
));
first_payment
} else {
0u32.into()
};
// Calculate the reward info to store after the initial payment has been made.
let mut reward_info = RewardInfo {
total_reward: *reward,
claimed_reward: initial_payment,
contributed_relay_addresses: vec![relay_account.clone()],
};
current_initialized_rewards += *reward - initial_payment;
total_contributors += 1;
if let Some(native_account) = native_account {
if let Some(mut inserted_reward_info) =
AccountsPayable::<T>::get(native_account)
{
inserted_reward_info
.contributed_relay_addresses
.append(&mut reward_info.contributed_relay_addresses);
// the native account has already some rewards in, we add the new ones
AccountsPayable::<T>::insert(
native_account,
RewardInfo {
total_reward: inserted_reward_info.total_reward
+ reward_info.total_reward,
claimed_reward: inserted_reward_info.claimed_reward
+ reward_info.claimed_reward,
contributed_relay_addresses: inserted_reward_info
.contributed_relay_addresses,
},
);
} else {
// First reward association
AccountsPayable::<T>::insert(native_account, reward_info);
}
ClaimedRelayChainIds::<T>::insert(relay_account, ());
} else {
UnassociatedContributions::<T>::insert(relay_account, reward_info);
}
}
InitializedRewardAmount::<T>::put(current_initialized_rewards);
TotalContributors::<T>::put(total_contributors);
Ok(Default::default())
}
}
impl<T: Config> Pallet<T> {
/// The account ID that holds the Crowdloan's funds
pub fn account_id() -> T::AccountId {
PALLET_ID.into_account()
}
/// The Account Id's balance
pub fn pot() -> BalanceOf<T> {
T::RewardCurrency::free_balance(&Self::account_id())
}
/// Verify a set of signatures made with relay chain accounts
/// We are verifying all the signatures, and then counting
/// We could do something more efficient like count as we verify
/// In any of the cases the weight will need to account for all the signatures,
/// as we dont know beforehand whether they will be valid
fn verify_signatures(
proofs: Vec<(T::RelayChainAccountId, MultiSignature)>,
reward_info: RewardInfo<T>,
payload: Vec<u8>,
) -> DispatchResult {
// The proofs should
// 1. be signed by contributors to this address, otherwise they are not counted
// 2. Signs a valid native identity
// 3. The sum of the valid proofs needs to be bigger than InsufficientNumberOfValidProofs
// I use a map here for faster lookups
let mut voted: BTreeMap<T::RelayChainAccountId, ()> = BTreeMap::new();
for (relay_account, signature) in proofs {
// We just count votes that we have not seen
if voted.get(&relay_account).is_none() {
// Maybe I should not error here?
ensure!(
reward_info
.contributed_relay_addresses
.contains(&relay_account),
Error::<T>::NonContributedAddressProvided
);
// I am erroring here as I think it is good to know the reason in the single-case
// signature
ensure!(
signature.verify(payload.as_slice(), &relay_account.clone().into()),
Error::<T>::InvalidClaimSignature
);
voted.insert(relay_account, ());
}
}
// Ensure the votes are sufficient
ensure!(
Perbill::from_rational(
voted.len() as u32,
reward_info.contributed_relay_addresses.len() as u32
) >= T::RewardAddressRelayVoteThreshold::get(),
Error::<T>::InsufficientNumberOfValidProofs
);
Ok(())
}
}
#[pallet::error]
pub enum Error<T> {
/// User trying to associate a native identity with a relay chain identity for posterior
/// reward claiming provided an already associated relay chain identity
AlreadyAssociated,
/// Trying to introduce a batch that goes beyond the limits of the funds
BatchBeyondFundPot,
/// First claim already done
FirstClaimAlreadyDone,
/// The contribution is not high enough to be eligible for rewards
RewardNotHighEnough,
/// User trying to associate a native identity with a relay chain identity for posterior
/// reward claiming provided a wrong signature
InvalidClaimSignature,
/// User trying to claim the first free reward provided the wrong signature
InvalidFreeClaimSignature,
/// User trying to claim an award did not have an claim associated with it. This may mean
/// they did not contribute to the crowdloan, or they have not yet associated a native id
/// with their contribution
NoAssociatedClaim,
/// User trying to claim rewards has already claimed all rewards associated with its
/// identity and contribution
RewardsAlreadyClaimed,
/// Reward vec has already been initialized
RewardVecAlreadyInitialized,
/// Reward vec has not yet been fully initialized
RewardVecNotFullyInitializedYet,
/// Rewards should match funds of the pallet
RewardsDoNotMatchFund,
/// Initialize_reward_vec received too many contributors
TooManyContributors,
/// Provided vesting period is not valid
VestingPeriodNonValid,
/// User provided a signature from a non-contributor relay account
NonContributedAddressProvided,
/// User submitted an unsifficient number of proofs to change the reward address
InsufficientNumberOfValidProofs,
}
#[pallet::genesis_config]
pub struct GenesisConfig<T: Config> {
/// The amount of funds this pallet controls
pub funded_amount: BalanceOf<T>,
}
#[cfg(feature = "std")]
impl<T: Config> Default for GenesisConfig<T> {
fn default() -> Self {
Self {
funded_amount: 1u32.into(),
}
}
}
#[pallet::genesis_build]
impl<T: Config> GenesisBuild<T> for GenesisConfig<T> {
// This sets the funds of the crowdloan pallet
fn build(&self) {
T::RewardCurrency::deposit_creating(&Pallet::<T>::account_id(), self.funded_amount);
}
}
#[pallet::storage]
#[pallet::getter(fn accounts_payable)]
pub type AccountsPayable<T: Config> =
StorageMap<_, Blake2_128Concat, T::AccountId, RewardInfo<T>>;
#[pallet::storage]
#[pallet::getter(fn claimed_relay_chain_ids)]
pub type ClaimedRelayChainIds<T: Config> =
StorageMap<_, Blake2_128Concat, T::RelayChainAccountId, ()>;
#[pallet::storage]
#[pallet::getter(fn unassociated_contributions)]
pub type UnassociatedContributions<T: Config> =
StorageMap<_, Blake2_128Concat, T::RelayChainAccountId, RewardInfo<T>>;
#[pallet::storage]
#[pallet::getter(fn initialized)]
pub type Initialized<T: Config> = StorageValue<_, bool, ValueQuery, T::Initialized>;
#[pallet::storage]
#[pallet::storage_prefix = "InitRelayBlock"]
#[pallet::getter(fn init_vesting_block)]
/// Vesting block height at the initialization of the pallet
type InitVestingBlock<T: Config> = StorageValue<_, T::VestingBlockNumber, ValueQuery>;
#[pallet::storage]
#[pallet::storage_prefix = "EndRelayBlock"]
#[pallet::getter(fn end_vesting_block)]
/// Vesting block height at the initialization of the pallet
type EndVestingBlock<T: Config> = StorageValue<_, T::VestingBlockNumber, ValueQuery>;
#[pallet::storage]
#[pallet::getter(fn init_reward_amount)]
/// Total initialized amount so far. We store this to make pallet funds == contributors reward
/// check easier and more efficient
type InitializedRewardAmount<T: Config> = StorageValue<_, BalanceOf<T>, ValueQuery>;
#[pallet::storage]
#[pallet::getter(fn total_contributors)]
/// Total number of contributors to aid hinting benchmarking
type TotalContributors<T: Config> = StorageValue<_, u32, ValueQuery>;
#[pallet::event]
#[pallet::generate_deposit(fn deposit_event)]
pub enum Event<T: Config> {
/// The initial payment of InitializationPayment % was paid
InitialPaymentMade(T::AccountId, BalanceOf<T>),
/// Someone has proven they made a contribution and associated a native identity with it.
/// Data is the relay account, native account and the total amount of _rewards_ that will be paid
NativeIdentityAssociated(T::RelayChainAccountId, T::AccountId, BalanceOf<T>),
/// A contributor has claimed some rewards.
/// Data is the account getting paid and the amount of rewards paid.
RewardsPaid(T::AccountId, BalanceOf<T>),
/// A contributor has updated the reward address.
RewardAddressUpdated(T::AccountId, T::AccountId),
/// When initializing the reward vec an already initialized account was found
InitializedAlreadyInitializedAccount(
T::RelayChainAccountId,
Option<T::AccountId>,
BalanceOf<T>,
),
/// When initializing the reward vec an already initialized account was found
InitializedAccountWithNotEnoughContribution(
T::RelayChainAccountId,
Option<T::AccountId>,
BalanceOf<T>,
),
}
}
|
use crate::std::prelude::v1::*;
use serde::{Serialize, Deserialize};
use std::collections::BTreeMap;
use phala_types::{BlockRewardInfo, SignedWorkerMessage, WorkerMessagePayload};
use sp_core::U256;
use sp_core::ecdsa;
use sp_core::hashing::blake2_256;
use parity_scale_codec::Encode;
use crate::contracts::AccountIdWrapper;
use crate::msg_channel::MsgChannel;
pub type CommandIndex = u64;
type PhalaEvent = phala::RawEvent<sp_runtime::AccountId32, u128>;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum TransactionStatus {
Ok,
InsufficientBalance,
NoBalance,
UnknownError,
BadContractId,
BadCommand,
SymbolExist,
AssetIdNotFound,
NotAssetOwner,
BadSecret,
BadMachineId,
FailedToSign,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TransactionReceipt {
pub account: AccountIdWrapper,
pub block_num: chain::BlockNumber,
pub contract_id: u32,
pub command: String,
pub status: TransactionStatus,
}
#[derive(Default)]
pub struct System {
pub id_key: Option<ecdsa::Pair>,
pub id_pubkey: Vec<u8>,
pub hashed_id: U256,
pub machine_id: Vec<u8>,
pub receipts: BTreeMap<CommandIndex, TransactionReceipt>,
pub egress: MsgChannel,
}
impl System {
pub fn new() -> Self {
Default::default()
}
pub fn set_id(&mut self, pair: &ecdsa::Pair) {
let pubkey = ecdsa::Public::from(pair.clone());
let raw_pubkey: &[u8] = pubkey.as_ref();
let pkh = blake2_256(raw_pubkey);
self.id_key = Some(pair.clone());
self.id_pubkey = raw_pubkey.to_vec();
self.hashed_id = pkh.into();
println!("System::set_id: hashed identity key: {:?}", self.hashed_id);
}
pub fn set_machine_id(&mut self, machine_id: Vec<u8>) {
self.machine_id = machine_id;
}
pub fn add_receipt(&mut self, command_index: CommandIndex, tr: TransactionReceipt) {
self.receipts.insert(command_index, tr);
}
pub fn get_receipt(&self, command_index: CommandIndex) -> Option<&TransactionReceipt> {
self.receipts.get(&command_index)
}
pub fn handle_query(&mut self, accid_origin: Option<&chain::AccountId>, req: Request)
-> Response {
let inner = || -> Result<Response, Error> {
match req {
Request::QueryReceipt { command_index } => {
match self.get_receipt(command_index) {
Some(receipt) => {
let origin = accid_origin.ok_or(Error::NotAuthorized)?;
if receipt.account == AccountIdWrapper(origin.clone()) {
Ok(Response::QueryReceipt { receipt: receipt.clone() })
} else {
Err(Error::NotAuthorized)
}
},
None => Err(Error::Other(String::from("Transaction hash not found"))),
}
},
Request::GetWorkerEgress { start_sequence } => {
let pending_msgs: Vec<SignedWorkerMessage> = self.egress.queue
.iter()
.filter(|msg| msg.data.sequence >= start_sequence)
.cloned()
.collect();
Ok(Response::GetWorkerEgress {
length: pending_msgs.len(),
encoded_egreee_b64: base64::encode(&pending_msgs.encode())
})
},
// If we add more unhandled queries:
// _ => Err(Error::Other("Unknown command".to_string()))
}
};
match inner() {
Err(error) => Response::Error(error),
Ok(resp) => resp
}
}
pub fn handle_event(&mut self, blocknum: chain::BlockNumber, event: &PhalaEvent) -> Result<(), Error> {
match event {
// Reset the egress queue once we detected myself is re-registered
phala::RawEvent::WorkerRegistered(_stash, pubkey, _machine_id) => {
if pubkey == &self.id_pubkey {
println!("System::handle_event: Reset MsgChannel due to WorkerRegistered");
self.egress = Default::default();
}
},
phala::RawEvent::WorkerRenewed(_stash, machine_id) => {
// Not perfect because we only have machine_id but not pubkey here.
if machine_id == &self.machine_id {
println!("System::handle_event: Reset MsgChannel due to WorkerRenewed");
self.egress = Default::default();
}
},
// Handle other events
phala::RawEvent::WorkerMessageReceived(_stash, pubkey, seq) => {
println!("System::handle_event: Message confirmed (seq={})", seq);
// Advance the egress queue messages
if pubkey == &self.id_pubkey {
self.egress.received(*seq);
}
},
phala::RawEvent::RewardSeed(reward_info) => {
self.handle_reward_seed(blocknum, &reward_info)?;
},
_ => ()
};
Ok(())
}
fn handle_reward_seed(&mut self, blocknum: chain::BlockNumber, reward_info: &BlockRewardInfo)
-> Result<(), Error> {
println!("System::handle_reward_seed({}, {:?})", blocknum, reward_info);
let x = self.hashed_id ^ reward_info.seed;
let online_hit = x <= reward_info.online_target;
// TODO: consider the compute_target only if we are chosen:
// let _compute_hit = x <= self.compute_target;
// Push queue when necessary
if online_hit {
println!("System::handle_reward_seed: online hit ({} < {})!", x, reward_info.online_target);
self.egress.push(WorkerMessagePayload::Heartbeat {
block_num: blocknum as u32,
claim_online: true,
claim_compute: false,
},
self.id_key.as_ref().expect("Id key not set in System contract"));
}
Ok(())
}
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Error {
NotAuthorized,
TxHashNotFound,
Other(String),
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Request {
QueryReceipt {
command_index: CommandIndex,
},
GetWorkerEgress {
start_sequence: u64,
},
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Response {
QueryReceipt {
receipt: TransactionReceipt
},
GetWorkerEgress {
length: usize,
encoded_egreee_b64: String,
},
Error(Error),
}
|
use std::env;
fn main() -> std::io::Result<()> {
let _home = env::var("HOME").expect("could not get $HOME");
// nop
Ok(())
}
|
use rust::prelude::*;
use types::{int_t, char_t};
use posix::pm::exit;
use posix::stdlib::{ARGV, ARGC, ENVP, ENVC, APPLE};
extern "C" {
fn main(argc: int_t,
argv: *const *const char_t,
envp: *const *const char_t,
apple: *const *const char_t) -> int_t;
}
/// This function is mangled to "_libc_start_main", which the linker looks.
/// Also, Rust inserts the frame-pointer prelude, which is invalid
/// for an executable's entry point.
#[no_mangle]
pub unsafe extern fn _libc_start_main(argc: usize, argv: *const *const char_t) {
ARGC = argc;
ARGV = argv;
ENVP = offset(ARGV, ARGC as isize + 1);
let mut apple: *const *const char_t = ENVP;
while *apple as usize != 0 {
apple = offset(apple, 1); // increases by one pointer size
}
ENVC = apple as usize - ENVP as usize - 1;
apple = offset(apple, 1); // one NULL pointer separates apple[] from env[]
APPLE = apple;
let status = main(ARGC as int_t, ARGV, ENVP, apple);
exit(status);
}
|
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use alloc::sync::Arc;
use spin::Mutex;
use alloc::collections::btree_map::BTreeMap;
use alloc::string::ToString;
use super::super::super::qlib::device::*;
use super::super::super::qlib::linux_def::*;
use super::super::super::qlib::auth::*;
use super::super::super::task::*;
use super::super::mount::*;
use super::super::inode::*;
use super::super::attr::*;
use super::super::ramfs::dir::*;
use super::super::ramfs::symlink::*;
use super::null::*;
use super::zero::*;
use super::full::*;
use super::random::*;
use super::tty::*;
const MEM_DEV_MAJOR: u16 = 1;
// Mem device minors.
const NULL_DEV_MINOR: u32 = 3;
const ZERO_DEV_MINOR: u32 = 5;
const FULL_DEV_MINOR: u32 = 7;
const RANDOM_DEV_MINOR: u32 = 8;
const URANDOM_DEV_MINOR: u32 = 9;
fn NewTTYDevice(iops: &Arc<TTYDevice>, msrc: &Arc<Mutex<MountSource>>) -> Inode {
let deviceId = DEV_DEVICE.lock().id.DeviceID();
let inodeId = DEV_DEVICE.lock().NextIno();
let stableAttr = StableAttr {
Type: InodeType::CharacterDevice,
DeviceId: deviceId,
InodeId: inodeId,
BlockSize: MemoryDef::PAGE_SIZE as i64,
DeviceFileMajor: 5 ,
DeviceFileMinor: 0,
};
let inodeInternal = InodeIntern {
InodeOp: iops.clone(),
StableAttr: stableAttr,
LockCtx: LockCtx::default(),
MountSource: msrc.clone(),
Overlay: None,
..Default::default()
};
return Inode(Arc::new(Mutex::new(inodeInternal)))
}
fn NewNullDevice(iops: &Arc<NullDevice>, msrc: &Arc<Mutex<MountSource>>) -> Inode {
let deviceId = DEV_DEVICE.lock().id.DeviceID();
let inodeId = DEV_DEVICE.lock().NextIno();
let stableAttr = StableAttr {
Type: InodeType::CharacterDevice,
DeviceId: deviceId,
InodeId: inodeId,
BlockSize: MemoryDef::PAGE_SIZE as i64,
DeviceFileMajor: MEM_DEV_MAJOR,
DeviceFileMinor: NULL_DEV_MINOR,
};
let inodeInternal = InodeIntern {
InodeOp: iops.clone(),
StableAttr: stableAttr,
LockCtx: LockCtx::default(),
MountSource: msrc.clone(),
Overlay: None,
..Default::default()
};
return Inode(Arc::new(Mutex::new(inodeInternal)))
}
fn NewZeroDevice(iops: &Arc<ZeroDevice>, msrc: &Arc<Mutex<MountSource>>) -> Inode {
let deviceId = DEV_DEVICE.lock().id.DeviceID();
let inodeId = DEV_DEVICE.lock().NextIno();
let stableAttr = StableAttr {
Type: InodeType::CharacterDevice,
DeviceId: deviceId,
InodeId: inodeId,
BlockSize: MemoryDef::PAGE_SIZE as i64,
DeviceFileMajor: MEM_DEV_MAJOR,
DeviceFileMinor: ZERO_DEV_MINOR,
};
let inodeInternal = InodeIntern {
InodeOp: iops.clone(),
StableAttr: stableAttr,
LockCtx: LockCtx::default(),
MountSource: msrc.clone(),
Overlay: None,
..Default::default()
};
return Inode(Arc::new(Mutex::new(inodeInternal)))
}
fn NewFullDevice(iops: &Arc<FullDevice>, msrc: &Arc<Mutex<MountSource>>) -> Inode {
let deviceId = DEV_DEVICE.lock().id.DeviceID();
let inodeId = DEV_DEVICE.lock().NextIno();
let stableAttr = StableAttr {
Type: InodeType::CharacterDevice,
DeviceId: deviceId,
InodeId: inodeId,
BlockSize: MemoryDef::PAGE_SIZE as i64,
DeviceFileMajor: MEM_DEV_MAJOR,
DeviceFileMinor: FULL_DEV_MINOR,
};
let inodeInternal = InodeIntern {
InodeOp: iops.clone(),
StableAttr: stableAttr,
LockCtx: LockCtx::default(),
MountSource: msrc.clone(),
Overlay: None,
..Default::default()
};
return Inode(Arc::new(Mutex::new(inodeInternal)))
}
fn NewRandomDevice(iops: &Arc<RandomDevice>, msrc: &Arc<Mutex<MountSource>>, minor: u32) -> Inode {
let deviceId = DEV_DEVICE.lock().id.DeviceID();
let inodeId = DEV_DEVICE.lock().NextIno();
let stableAttr = StableAttr {
Type: InodeType::CharacterDevice,
DeviceId: deviceId,
InodeId: inodeId,
BlockSize: MemoryDef::PAGE_SIZE as i64,
DeviceFileMajor: MEM_DEV_MAJOR,
DeviceFileMinor: minor,
};
let inodeInternal = InodeIntern {
InodeOp: iops.clone(),
StableAttr: stableAttr,
LockCtx: LockCtx::default(),
MountSource: msrc.clone(),
Overlay: None,
..Default::default()
};
return Inode(Arc::new(Mutex::new(inodeInternal)))
}
fn NewDirectory(task: &Task, msrc: &Arc<Mutex<MountSource>>) -> Inode {
let iops = Dir::New(task, BTreeMap::new(), &ROOT_OWNER, &FilePermissions::FromMode(FileMode(0o0555)));
let deviceId = PROC_DEVICE.lock().id.DeviceID();
let inodeId = PROC_DEVICE.lock().NextIno();
let stableAttr = StableAttr {
Type: InodeType::Directory,
DeviceId: deviceId,
InodeId: inodeId,
BlockSize: MemoryDef::PAGE_SIZE as i64,
DeviceFileMajor: 0,
DeviceFileMinor: 0,
};
let inodeInternal = InodeIntern {
InodeOp: Arc::new(iops),
StableAttr: stableAttr,
LockCtx: LockCtx::default(),
MountSource: msrc.clone(),
Overlay: None,
..Default::default()
};
return Inode(Arc::new(Mutex::new(inodeInternal)))
}
fn NewSymlink(task: &Task, target: &str, msrc: &Arc<Mutex<MountSource>>) -> Inode {
let iops = Symlink::New(task, &ROOT_OWNER, target);
let deviceId = DEV_DEVICE.lock().id.DeviceID();
let inodeId = DEV_DEVICE.lock().NextIno();
let stableAttr = StableAttr {
Type: InodeType::Symlink,
DeviceId: deviceId,
InodeId: inodeId,
BlockSize: MemoryDef::PAGE_SIZE as i64,
DeviceFileMajor: 0,
DeviceFileMinor: 0,
};
let inodeInternal = InodeIntern {
InodeOp: Arc::new(iops),
StableAttr: stableAttr,
LockCtx: LockCtx::default(),
MountSource: msrc.clone(),
Overlay: None,
..Default::default()
};
return Inode(Arc::new(Mutex::new(inodeInternal)))
}
pub fn NewDev(task: &Task, msrc: &Arc<Mutex<MountSource>>) -> Inode {
let mut contents = BTreeMap::new();
contents.insert("fd".to_string(), NewSymlink(task, &"/proc/self/fd".to_string(), msrc));
contents.insert("stdin".to_string(), NewSymlink(task, &"/proc/self/fd/0".to_string(), msrc));
contents.insert("stdout".to_string(), NewSymlink(task, &"/proc/self/fd/1".to_string(), msrc));
contents.insert("stderr".to_string(), NewSymlink(task, &"/proc/self/fd/2".to_string(), msrc));
contents.insert("null".to_string(), NewNullDevice(&Arc::new(NullDevice::New(task, &ROOT_OWNER, &FileMode(0o0666))), msrc));
contents.insert("zero".to_string(), NewZeroDevice(&Arc::new(ZeroDevice::New(task, &ROOT_OWNER, &FileMode(0o0666))), msrc));
contents.insert("full".to_string(), NewFullDevice(&Arc::new(FullDevice::New(task, &ROOT_OWNER, &FileMode(0o0666))), msrc));
// This is not as good as /dev/random in linux because go
// runtime uses sys_random and /dev/urandom internally.
// According to 'man 4 random', this will be sufficient unless
// application uses this to generate long-lived GPG/SSL/SSH
// keys.
contents.insert("random".to_string(), NewRandomDevice(&Arc::new(RandomDevice::New(task, &ROOT_OWNER, &FileMode(0o0666))), msrc, RANDOM_DEV_MINOR));
contents.insert("urandom".to_string(), NewRandomDevice(&Arc::new(RandomDevice::New(task, &ROOT_OWNER, &FileMode(0o0666))), msrc, URANDOM_DEV_MINOR));
// A devpts is typically mounted at /dev/pts to provide
// pseudoterminal support. Place an empty directory there for
// the devpts to be mounted over.
//contents.insert("pts".to_string(), NewDirectory(task, msrc));
// Similarly, applications expect a ptmx device at /dev/ptmx
// connected to the terminals provided by /dev/pts/. Rather
// than creating a device directly (which requires a hairy
// lookup on open to determine if a devpts exists), just create
// a symlink to the ptmx provided by devpts. (The Linux devpts
// documentation recommends this).
//
// If no devpts is mounted, this will simply be a dangling
// symlink, which is fine.
contents.insert("ptmx".to_string(), NewSymlink(task, &"pts/ptmx".to_string(), msrc));
let ttyDevice = TTYDevice::New(task, &ROOT_OWNER, &FileMode(0o0666));
contents.insert("tty".to_string(), NewTTYDevice(&Arc::new(ttyDevice), msrc));
let iops = Dir::New(task, contents, &ROOT_OWNER, &FilePermissions::FromMode(FileMode(0o0555)));
let deviceId = DEV_DEVICE.lock().id.DeviceID();
let inodeId = DEV_DEVICE.lock().NextIno();
let stableAttr = StableAttr {
Type: InodeType::Directory,
DeviceId: deviceId,
InodeId: inodeId,
BlockSize: MemoryDef::PAGE_SIZE as i64,
DeviceFileMajor: 0,
DeviceFileMinor: 0,
};
let inodeInternal = InodeIntern {
InodeOp: Arc::new(iops),
StableAttr: stableAttr,
LockCtx: LockCtx::default(),
MountSource: msrc.clone(),
Overlay: None,
..Default::default()
};
return Inode(Arc::new(Mutex::new(inodeInternal)))
} |
#![allow(unused_variables, non_upper_case_globals, non_snake_case, unused_unsafe, non_camel_case_types, dead_code, clippy::all)]
#[cfg(feature = "System_Power_Diagnostics")]
pub mod Diagnostics;
pub struct BackgroundEnergyManager {}
impl BackgroundEnergyManager {
#[cfg(feature = "deprecated")]
pub fn LowUsageLevel() -> ::windows::core::Result<u32> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).6)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn NearMaxAcceptableUsageLevel() -> ::windows::core::Result<u32> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).7)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn MaxAcceptableUsageLevel() -> ::windows::core::Result<u32> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).8)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn ExcessiveUsageLevel() -> ::windows::core::Result<u32> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).9)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn NearTerminationUsageLevel() -> ::windows::core::Result<u32> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).10)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn TerminationUsageLevel() -> ::windows::core::Result<u32> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).11)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn RecentEnergyUsage() -> ::windows::core::Result<u32> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).12)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn RecentEnergyUsageLevel() -> ::windows::core::Result<u32> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).13)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
#[cfg(feature = "Foundation")]
pub fn RecentEnergyUsageIncreased<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventHandler<::windows::core::IInspectable>>>(handler: Param0) -> ::windows::core::Result<super::super::Foundation::EventRegistrationToken> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe {
let mut result__: super::super::Foundation::EventRegistrationToken = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).14)(::core::mem::transmute_copy(this), handler.into_param().abi(), &mut result__).from_abi::<super::super::Foundation::EventRegistrationToken>(result__)
})
}
#[cfg(feature = "deprecated")]
#[cfg(feature = "Foundation")]
pub fn RemoveRecentEnergyUsageIncreased<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventRegistrationToken>>(token: Param0) -> ::windows::core::Result<()> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe { (::windows::core::Interface::vtable(this).15)(::core::mem::transmute_copy(this), token.into_param().abi()).ok() })
}
#[cfg(feature = "deprecated")]
#[cfg(feature = "Foundation")]
pub fn RecentEnergyUsageReturnedToLow<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventHandler<::windows::core::IInspectable>>>(handler: Param0) -> ::windows::core::Result<super::super::Foundation::EventRegistrationToken> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe {
let mut result__: super::super::Foundation::EventRegistrationToken = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).16)(::core::mem::transmute_copy(this), handler.into_param().abi(), &mut result__).from_abi::<super::super::Foundation::EventRegistrationToken>(result__)
})
}
#[cfg(feature = "deprecated")]
#[cfg(feature = "Foundation")]
pub fn RemoveRecentEnergyUsageReturnedToLow<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventRegistrationToken>>(token: Param0) -> ::windows::core::Result<()> {
Self::IBackgroundEnergyManagerStatics(|this| unsafe { (::windows::core::Interface::vtable(this).17)(::core::mem::transmute_copy(this), token.into_param().abi()).ok() })
}
pub fn IBackgroundEnergyManagerStatics<R, F: FnOnce(&IBackgroundEnergyManagerStatics) -> ::windows::core::Result<R>>(callback: F) -> ::windows::core::Result<R> {
static mut SHARED: ::windows::core::FactoryCache<BackgroundEnergyManager, IBackgroundEnergyManagerStatics> = ::windows::core::FactoryCache::new();
unsafe { SHARED.call(callback) }
}
}
impl ::windows::core::RuntimeName for BackgroundEnergyManager {
const NAME: &'static str = "Windows.System.Power.BackgroundEnergyManager";
}
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: marker :: Copy, :: core :: clone :: Clone, :: core :: default :: Default, :: core :: fmt :: Debug)]
#[repr(transparent)]
pub struct BatteryStatus(pub i32);
impl BatteryStatus {
pub const NotPresent: BatteryStatus = BatteryStatus(0i32);
pub const Discharging: BatteryStatus = BatteryStatus(1i32);
pub const Idle: BatteryStatus = BatteryStatus(2i32);
pub const Charging: BatteryStatus = BatteryStatus(3i32);
}
impl ::core::convert::From<i32> for BatteryStatus {
fn from(value: i32) -> Self {
Self(value)
}
}
unsafe impl ::windows::core::Abi for BatteryStatus {
type Abi = Self;
}
unsafe impl ::windows::core::RuntimeType for BatteryStatus {
const SIGNATURE: ::windows::core::ConstBuffer = ::windows::core::ConstBuffer::from_slice(b"enum(Windows.System.Power.BatteryStatus;i4)");
}
impl ::windows::core::DefaultType for BatteryStatus {
type DefaultType = Self;
}
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: marker :: Copy, :: core :: clone :: Clone, :: core :: default :: Default, :: core :: fmt :: Debug)]
#[repr(transparent)]
pub struct EnergySaverStatus(pub i32);
impl EnergySaverStatus {
pub const Disabled: EnergySaverStatus = EnergySaverStatus(0i32);
pub const Off: EnergySaverStatus = EnergySaverStatus(1i32);
pub const On: EnergySaverStatus = EnergySaverStatus(2i32);
}
impl ::core::convert::From<i32> for EnergySaverStatus {
fn from(value: i32) -> Self {
Self(value)
}
}
unsafe impl ::windows::core::Abi for EnergySaverStatus {
type Abi = Self;
}
unsafe impl ::windows::core::RuntimeType for EnergySaverStatus {
const SIGNATURE: ::windows::core::ConstBuffer = ::windows::core::ConstBuffer::from_slice(b"enum(Windows.System.Power.EnergySaverStatus;i4)");
}
impl ::windows::core::DefaultType for EnergySaverStatus {
type DefaultType = Self;
}
pub struct ForegroundEnergyManager {}
impl ForegroundEnergyManager {
#[cfg(feature = "deprecated")]
pub fn LowUsageLevel() -> ::windows::core::Result<u32> {
Self::IForegroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).6)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn NearMaxAcceptableUsageLevel() -> ::windows::core::Result<u32> {
Self::IForegroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).7)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn MaxAcceptableUsageLevel() -> ::windows::core::Result<u32> {
Self::IForegroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).8)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn ExcessiveUsageLevel() -> ::windows::core::Result<u32> {
Self::IForegroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).9)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn RecentEnergyUsage() -> ::windows::core::Result<u32> {
Self::IForegroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).10)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
pub fn RecentEnergyUsageLevel() -> ::windows::core::Result<u32> {
Self::IForegroundEnergyManagerStatics(|this| unsafe {
let mut result__: u32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).11)(::core::mem::transmute_copy(this), &mut result__).from_abi::<u32>(result__)
})
}
#[cfg(feature = "deprecated")]
#[cfg(feature = "Foundation")]
pub fn RecentEnergyUsageIncreased<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventHandler<::windows::core::IInspectable>>>(handler: Param0) -> ::windows::core::Result<super::super::Foundation::EventRegistrationToken> {
Self::IForegroundEnergyManagerStatics(|this| unsafe {
let mut result__: super::super::Foundation::EventRegistrationToken = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).12)(::core::mem::transmute_copy(this), handler.into_param().abi(), &mut result__).from_abi::<super::super::Foundation::EventRegistrationToken>(result__)
})
}
#[cfg(feature = "deprecated")]
#[cfg(feature = "Foundation")]
pub fn RemoveRecentEnergyUsageIncreased<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventRegistrationToken>>(token: Param0) -> ::windows::core::Result<()> {
Self::IForegroundEnergyManagerStatics(|this| unsafe { (::windows::core::Interface::vtable(this).13)(::core::mem::transmute_copy(this), token.into_param().abi()).ok() })
}
#[cfg(feature = "deprecated")]
#[cfg(feature = "Foundation")]
pub fn RecentEnergyUsageReturnedToLow<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventHandler<::windows::core::IInspectable>>>(handler: Param0) -> ::windows::core::Result<super::super::Foundation::EventRegistrationToken> {
Self::IForegroundEnergyManagerStatics(|this| unsafe {
let mut result__: super::super::Foundation::EventRegistrationToken = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).14)(::core::mem::transmute_copy(this), handler.into_param().abi(), &mut result__).from_abi::<super::super::Foundation::EventRegistrationToken>(result__)
})
}
#[cfg(feature = "deprecated")]
#[cfg(feature = "Foundation")]
pub fn RemoveRecentEnergyUsageReturnedToLow<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventRegistrationToken>>(token: Param0) -> ::windows::core::Result<()> {
Self::IForegroundEnergyManagerStatics(|this| unsafe { (::windows::core::Interface::vtable(this).15)(::core::mem::transmute_copy(this), token.into_param().abi()).ok() })
}
pub fn IForegroundEnergyManagerStatics<R, F: FnOnce(&IForegroundEnergyManagerStatics) -> ::windows::core::Result<R>>(callback: F) -> ::windows::core::Result<R> {
static mut SHARED: ::windows::core::FactoryCache<ForegroundEnergyManager, IForegroundEnergyManagerStatics> = ::windows::core::FactoryCache::new();
unsafe { SHARED.call(callback) }
}
}
impl ::windows::core::RuntimeName for ForegroundEnergyManager {
const NAME: &'static str = "Windows.System.Power.ForegroundEnergyManager";
}
#[repr(transparent)]
#[doc(hidden)]
pub struct IBackgroundEnergyManagerStatics(pub ::windows::core::IInspectable);
unsafe impl ::windows::core::Interface for IBackgroundEnergyManagerStatics {
type Vtable = IBackgroundEnergyManagerStatics_abi;
const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0xb3161d95_1180_4376_96e1_4095568147ce);
}
#[repr(C)]
#[doc(hidden)]
pub struct IBackgroundEnergyManagerStatics_abi(
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32, values: *mut *mut ::windows::core::GUID) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut i32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, handler: ::windows::core::RawPtr, result__: *mut super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, token: super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, handler: ::windows::core::RawPtr, result__: *mut super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, token: super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
);
#[repr(transparent)]
#[doc(hidden)]
pub struct IForegroundEnergyManagerStatics(pub ::windows::core::IInspectable);
unsafe impl ::windows::core::Interface for IForegroundEnergyManagerStatics {
type Vtable = IForegroundEnergyManagerStatics_abi;
const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0x9ff86872_e677_4814_9a20_5337ca732b98);
}
#[repr(C)]
#[doc(hidden)]
pub struct IForegroundEnergyManagerStatics_abi(
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32, values: *mut *mut ::windows::core::GUID) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut i32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut u32) -> ::windows::core::HRESULT,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, handler: ::windows::core::RawPtr, result__: *mut super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, token: super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, handler: ::windows::core::RawPtr, result__: *mut super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, token: super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
);
#[repr(transparent)]
#[doc(hidden)]
pub struct IPowerManagerStatics(pub ::windows::core::IInspectable);
unsafe impl ::windows::core::Interface for IPowerManagerStatics {
type Vtable = IPowerManagerStatics_abi;
const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0x1394825d_62ce_4364_98d5_aa28c7fbd15b);
}
#[repr(C)]
#[doc(hidden)]
pub struct IPowerManagerStatics_abi(
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32, values: *mut *mut ::windows::core::GUID) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut i32) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut EnergySaverStatus) -> ::windows::core::HRESULT,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, handler: ::windows::core::RawPtr, result__: *mut super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, token: super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut BatteryStatus) -> ::windows::core::HRESULT,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, handler: ::windows::core::RawPtr, result__: *mut super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, token: super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut PowerSupplyStatus) -> ::windows::core::HRESULT,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, handler: ::windows::core::RawPtr, result__: *mut super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, token: super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut i32) -> ::windows::core::HRESULT,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, handler: ::windows::core::RawPtr, result__: *mut super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, token: super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut super::super::Foundation::TimeSpan) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, handler: ::windows::core::RawPtr, result__: *mut super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, token: super::super::Foundation::EventRegistrationToken) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
);
pub struct PowerManager {}
impl PowerManager {
pub fn EnergySaverStatus() -> ::windows::core::Result<EnergySaverStatus> {
Self::IPowerManagerStatics(|this| unsafe {
let mut result__: EnergySaverStatus = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).6)(::core::mem::transmute_copy(this), &mut result__).from_abi::<EnergySaverStatus>(result__)
})
}
#[cfg(feature = "Foundation")]
pub fn EnergySaverStatusChanged<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventHandler<::windows::core::IInspectable>>>(handler: Param0) -> ::windows::core::Result<super::super::Foundation::EventRegistrationToken> {
Self::IPowerManagerStatics(|this| unsafe {
let mut result__: super::super::Foundation::EventRegistrationToken = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).7)(::core::mem::transmute_copy(this), handler.into_param().abi(), &mut result__).from_abi::<super::super::Foundation::EventRegistrationToken>(result__)
})
}
#[cfg(feature = "Foundation")]
pub fn RemoveEnergySaverStatusChanged<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventRegistrationToken>>(token: Param0) -> ::windows::core::Result<()> {
Self::IPowerManagerStatics(|this| unsafe { (::windows::core::Interface::vtable(this).8)(::core::mem::transmute_copy(this), token.into_param().abi()).ok() })
}
pub fn BatteryStatus() -> ::windows::core::Result<BatteryStatus> {
Self::IPowerManagerStatics(|this| unsafe {
let mut result__: BatteryStatus = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).9)(::core::mem::transmute_copy(this), &mut result__).from_abi::<BatteryStatus>(result__)
})
}
#[cfg(feature = "Foundation")]
pub fn BatteryStatusChanged<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventHandler<::windows::core::IInspectable>>>(handler: Param0) -> ::windows::core::Result<super::super::Foundation::EventRegistrationToken> {
Self::IPowerManagerStatics(|this| unsafe {
let mut result__: super::super::Foundation::EventRegistrationToken = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).10)(::core::mem::transmute_copy(this), handler.into_param().abi(), &mut result__).from_abi::<super::super::Foundation::EventRegistrationToken>(result__)
})
}
#[cfg(feature = "Foundation")]
pub fn RemoveBatteryStatusChanged<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventRegistrationToken>>(token: Param0) -> ::windows::core::Result<()> {
Self::IPowerManagerStatics(|this| unsafe { (::windows::core::Interface::vtable(this).11)(::core::mem::transmute_copy(this), token.into_param().abi()).ok() })
}
pub fn PowerSupplyStatus() -> ::windows::core::Result<PowerSupplyStatus> {
Self::IPowerManagerStatics(|this| unsafe {
let mut result__: PowerSupplyStatus = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).12)(::core::mem::transmute_copy(this), &mut result__).from_abi::<PowerSupplyStatus>(result__)
})
}
#[cfg(feature = "Foundation")]
pub fn PowerSupplyStatusChanged<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventHandler<::windows::core::IInspectable>>>(handler: Param0) -> ::windows::core::Result<super::super::Foundation::EventRegistrationToken> {
Self::IPowerManagerStatics(|this| unsafe {
let mut result__: super::super::Foundation::EventRegistrationToken = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).13)(::core::mem::transmute_copy(this), handler.into_param().abi(), &mut result__).from_abi::<super::super::Foundation::EventRegistrationToken>(result__)
})
}
#[cfg(feature = "Foundation")]
pub fn RemovePowerSupplyStatusChanged<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventRegistrationToken>>(token: Param0) -> ::windows::core::Result<()> {
Self::IPowerManagerStatics(|this| unsafe { (::windows::core::Interface::vtable(this).14)(::core::mem::transmute_copy(this), token.into_param().abi()).ok() })
}
pub fn RemainingChargePercent() -> ::windows::core::Result<i32> {
Self::IPowerManagerStatics(|this| unsafe {
let mut result__: i32 = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).15)(::core::mem::transmute_copy(this), &mut result__).from_abi::<i32>(result__)
})
}
#[cfg(feature = "Foundation")]
pub fn RemainingChargePercentChanged<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventHandler<::windows::core::IInspectable>>>(handler: Param0) -> ::windows::core::Result<super::super::Foundation::EventRegistrationToken> {
Self::IPowerManagerStatics(|this| unsafe {
let mut result__: super::super::Foundation::EventRegistrationToken = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).16)(::core::mem::transmute_copy(this), handler.into_param().abi(), &mut result__).from_abi::<super::super::Foundation::EventRegistrationToken>(result__)
})
}
#[cfg(feature = "Foundation")]
pub fn RemoveRemainingChargePercentChanged<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventRegistrationToken>>(token: Param0) -> ::windows::core::Result<()> {
Self::IPowerManagerStatics(|this| unsafe { (::windows::core::Interface::vtable(this).17)(::core::mem::transmute_copy(this), token.into_param().abi()).ok() })
}
#[cfg(feature = "Foundation")]
pub fn RemainingDischargeTime() -> ::windows::core::Result<super::super::Foundation::TimeSpan> {
Self::IPowerManagerStatics(|this| unsafe {
let mut result__: super::super::Foundation::TimeSpan = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).18)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::Foundation::TimeSpan>(result__)
})
}
#[cfg(feature = "Foundation")]
pub fn RemainingDischargeTimeChanged<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventHandler<::windows::core::IInspectable>>>(handler: Param0) -> ::windows::core::Result<super::super::Foundation::EventRegistrationToken> {
Self::IPowerManagerStatics(|this| unsafe {
let mut result__: super::super::Foundation::EventRegistrationToken = ::core::mem::zeroed();
(::windows::core::Interface::vtable(this).19)(::core::mem::transmute_copy(this), handler.into_param().abi(), &mut result__).from_abi::<super::super::Foundation::EventRegistrationToken>(result__)
})
}
#[cfg(feature = "Foundation")]
pub fn RemoveRemainingDischargeTimeChanged<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::EventRegistrationToken>>(token: Param0) -> ::windows::core::Result<()> {
Self::IPowerManagerStatics(|this| unsafe { (::windows::core::Interface::vtable(this).20)(::core::mem::transmute_copy(this), token.into_param().abi()).ok() })
}
pub fn IPowerManagerStatics<R, F: FnOnce(&IPowerManagerStatics) -> ::windows::core::Result<R>>(callback: F) -> ::windows::core::Result<R> {
static mut SHARED: ::windows::core::FactoryCache<PowerManager, IPowerManagerStatics> = ::windows::core::FactoryCache::new();
unsafe { SHARED.call(callback) }
}
}
impl ::windows::core::RuntimeName for PowerManager {
const NAME: &'static str = "Windows.System.Power.PowerManager";
}
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: marker :: Copy, :: core :: clone :: Clone, :: core :: default :: Default, :: core :: fmt :: Debug)]
#[repr(transparent)]
pub struct PowerSupplyStatus(pub i32);
impl PowerSupplyStatus {
pub const NotPresent: PowerSupplyStatus = PowerSupplyStatus(0i32);
pub const Inadequate: PowerSupplyStatus = PowerSupplyStatus(1i32);
pub const Adequate: PowerSupplyStatus = PowerSupplyStatus(2i32);
}
impl ::core::convert::From<i32> for PowerSupplyStatus {
fn from(value: i32) -> Self {
Self(value)
}
}
unsafe impl ::windows::core::Abi for PowerSupplyStatus {
type Abi = Self;
}
unsafe impl ::windows::core::RuntimeType for PowerSupplyStatus {
const SIGNATURE: ::windows::core::ConstBuffer = ::windows::core::ConstBuffer::from_slice(b"enum(Windows.System.Power.PowerSupplyStatus;i4)");
}
impl ::windows::core::DefaultType for PowerSupplyStatus {
type DefaultType = Self;
}
|
/* Copyright (C) 2016 Yutaka Kamei */
#[macro_use] extern crate lazy_static;
extern crate rustc_serialize;
extern crate regex;
extern crate time;
extern crate uuid;
pub mod filter;
pub mod backend;
pub mod schema;
|
fn main() {
use std::io::prelude::*;
use std::fs::File;
fn foo() -> std::io::Result<()> {
let mut f = try!(File::create("foo.txt"));
try!(f.write_all(b"Hello, world!"));
let mut f = try!(File::open("foo.txt"));
let mut s = String::new();
try!(f.read_to_string(&mut s));
assert_eq!(s, "Hello, world!");
Ok(())
}
match foo() {
Ok(()) => println!("All good!"),
_ => println!("Something happened"),
}
}
|
#![experimental]
use std::thread::Thread;
#[doc = r#"
This crate contains a few functions that demonstrate how to
program concurrent programs in Rust.
Our first function (a_spawn_thread) spawns two threads:
the first thread prints 'hello' and the
2nd thread prints 'world'.
```test_harness
use std::thread::Thread;
#[test]
pub fn a_spawn_thread() {
let _t1 = Thread::spawn(|| { println!("Hello");}); // spawn thread
let _t2 = Thread::spawn(|| { println!("World");}); // spawn thread
assert!(true);
}
```
"#]
pub fn a_spawn_thread() {
let _t1 = Thread::spawn(|| { println!("Hello");}); // spawn thread
let _t2 = Thread::spawn(|| { println!("World");}); // spawn thread
assert!(true);
}
#[doc = r#"
function b_channel creates a channel to transport integers
and spawns one thread _t0 that
receives integers from this channel and it generates two threads _t1 and _t2
that will send each one value across the channel.
```test_harness
use std::thread::Thread;
#[test]
pub fn b_channel() {
let (tx,rx) = channel();
let tx1 = tx.clone();
let _t0 = Thread::spawn(move || {
let v1 : int = rx.recv();
let v2 : int = rx.recv();
println!("Value= {}", v1);
println!("Value= {}", v2);
assert!((v1 == 1 && v2 == 3) || (v1 == 3 && v2 == 1));
});
let _t1 = Thread::spawn(move || { tx.send(1i);});
let _t2 = Thread::spawn(move || { tx1.send(3);});
}
```
"#]
pub fn b_channel() {
let (tx,rx) = channel();
let tx1 = tx.clone();
let _t0 = Thread::spawn(move || {
let v1 : int = rx.recv();
let v2 : int = rx.recv();
println!("Value= {}", v1);
println!("Value= {}", v2);
assert!((v1 == 1 && v2 == 3) || (v1 == 3 && v2 == 1));
});
let _t1 = Thread::spawn(move || { tx.send(1i);});
let _t2 = Thread::spawn(move || { tx1.send(3);});
}
#[doc = "
define messages for the actor implementation of the 'sadistic homework'.
"]
pub enum Msg<T : Send> {
GetLeft(Sender<Option<T>>), // dequeue value from the left of the list
GetRight(Sender<Option<T>>), // dequeue value from the right of the list
PutLeft(T), // enqueue a value on the left side of the list
PutRight(T), // enqueue a value on the right side of the list
Terminate, // terminate the thread
}
#[doc = r#"
An actor-based solution of the 'sadistic homework'.
This is a blocking version: the actor only replies to a get request
if we can dequeue an element from the queue
```test_harness
use std::thread::Thread;
use concurrency::Msg;
#[test]
pub fn c_blocking_get() {
let (tx,rx) = channel();
let _t = Thread::spawn(move || {
let mut list : Vec<uint> = Vec::new();
let (mtx,mrx) = channel(); // delayed messages
loop { // do we have some Get requests to process?
let mut cont = true;
while cont && list.len() > 0 {
match mrx.try_recv() {
Ok(Msg::GetLeft(tx)) => tx.send(list.remove(0)),
Ok(Msg::GetRight(tx)) => tx.send(list.pop()),
_ => cont = false,
}
}
let m = rx.recv();
match m {
Msg::GetLeft(tx) =>
if list.len() > 0 { tx.send(list.remove(0)) }
else { mtx.send(Msg::GetLeft(tx)) },
Msg::GetRight(tx)=>
if list.len() > 0 { tx.send(list.pop()) }
else { mtx.send(Msg::GetRight(tx)) },
Msg::PutLeft(v) => list.insert(0u, v),
Msg::PutRight(v) => list.push(v),
Msg::Terminate => return,
};
}
});
tx.send(Msg::PutLeft(10u));
tx.send(Msg::PutRight(11u));
let (rtx,rrx) = channel();
tx.send(Msg::GetLeft(rtx.clone()));
let v = rrx.recv();
assert!(v == Some(10u));
tx.send(Msg::GetLeft(rtx.clone()));
let v = rrx.recv();
assert!(v == Some(11u));
tx.send(Msg::GetLeft(rtx.clone()));
tx.send(Msg::PutRight(12u));
let v = rrx.recv();
assert!(v == Some(12u));
tx.send(Msg::Terminate);
}
```
"#]
pub fn c_blocking_get() {
let (tx,rx) = channel();
let _t = Thread::spawn(move || {
let mut list : Vec<uint> = Vec::new();
let (mtx,mrx) = channel(); // delayed messages
loop { // first see if we can need to process some Get requests
let mut cont = true;
while cont && list.len() > 0 {
match mrx.try_recv() {
Ok(Msg::GetLeft(tx)) => tx.send(list.remove(0)),
Ok(Msg::GetRight(tx)) => tx.send(list.pop()),
_ => cont = false,
}
}
let m = rx.recv();
match m {
Msg::GetLeft(tx) =>
if list.len() > 0 { tx.send(list.remove(0)) }
else { mtx.send(Msg::GetLeft(tx)) },
Msg::GetRight(tx)=>
if list.len() > 0 { tx.send(list.pop()) }
else { mtx.send(Msg::GetRight(tx)) },
Msg::PutLeft(v) => list.insert(0u, v),
Msg::PutRight(v) => list.push(v),
Msg::Terminate => return,
};
}
});
tx.send(Msg::PutLeft(10u));
tx.send(Msg::PutRight(11u));
let (rtx,rrx) = channel();
tx.send(Msg::GetLeft(rtx.clone()));
let v = rrx.recv();
assert!(v == Some(10u));
tx.send(Msg::GetLeft(rtx.clone()));
let v = rrx.recv();
assert!(v == Some(11u));
tx.send(Msg::GetLeft(rtx.clone()));
tx.send(Msg::PutRight(12u));
let v = rrx.recv();
assert!(v == Some(12u));
tx.send(Msg::Terminate);
}
#[doc = r#"
actor solution of the 'sadistic homework'.
This is a polling version: the actor replies with a 'None'
when attempting to read from an empty list.
```test_harness
use std::thread::Thread;
use concurrency::Msg;
#[test]
pub fn d_polling_version() {
let (tx,rx) = channel();
let _t = Thread::spawn(move || {
let mut list : Vec<uint> = Vec::new();
loop {
let m = rx.recv();
match m {
Msg::GetLeft(tx) => tx.send(list.remove(0)),
Msg::GetRight(tx)=> tx.send(list.pop()),
Msg::PutLeft(v) => list.insert(0u, v),
Msg::PutRight(v) => list.push(v),
Msg::Terminate => return,
};
}
});
let (rtx,rrx) = channel();
tx.send(Msg::GetLeft(rtx.clone()));
let v = rrx.recv();
assert!(v == None);
tx.send(Msg::PutLeft(10u));
tx.send(Msg::GetLeft(rtx.clone()));
let v = rrx.recv();
assert!(v == Some(10u));
tx.send(Msg::PutRight(11u));
tx.send(Msg::PutRight(12u));
tx.send(Msg::GetRight(rtx.clone()));
let v = rrx.recv();
assert!(v == Some(12u));
tx.send(Msg::Terminate);
}
```
"#]
pub fn d_polling_version() {
let (tx,rx) = channel();
let _t = Thread::spawn(move || {
let mut list : Vec<uint> = Vec::new();
loop {
let m = rx.recv();
match m {
Msg::GetLeft(tx) => tx.send(list.remove(0)),
Msg::GetRight(tx)=> tx.send(list.pop()),
Msg::PutLeft(v) => list.insert(0u, v),
Msg::PutRight(v) => list.push(v),
Msg::Terminate => return,
};
}
});
let (rtx,rrx) = channel();
tx.send(Msg::GetLeft(rtx.clone()));
let v = rrx.recv();
assert!(v == None);
tx.send(Msg::PutLeft(10u));
tx.send(Msg::GetLeft(rtx.clone()));
let v = rrx.recv();
assert!(v == Some(10u));
tx.send(Msg::PutRight(11u));
tx.send(Msg::PutRight(12u));
tx.send(Msg::GetRight(rtx.clone()));
let v = rrx.recv();
assert!(v == Some(12u));
tx.send(Msg::Terminate);
}
#[doc = r#"
actor solution of the 'sadistic homework' using select! instead
of a private channel to delay GetMessages arriving when the list
is empty.
We split the messages into two enums: GETs and PUTs
"#]
pub enum GETs<T : Send> {
GetLeft(Sender<Option<T>>),
GetRight(Sender<Option<T>>),
}
#[doc = "
We split the messages into two enums: GETs and PUTs
PUTs also contain the termination message.
"]
pub enum PUTs<T : Send> {
PutLeft(T),
PutRight(T),
Terminate,
}
#[doc = r#"
actor solution of the 'sadistic homework' using select! instead
of a private channel to delay GetMessages arriving when the list
is empty.
```test_harness
use std::thread::Thread;
use concurrency::PUTs;
use concurrency::GETs;
#[test]
fn e_select_version() {
let (ptx,prx) = channel();
let (gtx,grx) = channel();
let _t = Thread::spawn(move || {
let mut list : Vec<uint> = Vec::new();
loop {
if list.len() > 0 {
select!(
m = grx.recv() => {
match m {
GETs::GetLeft(tx) => tx.send(list.remove(0)),
GETs::GetRight(tx)=> tx.send(list.pop()),
};
},
m = prx.recv() => {
match m {
PUTs::PutLeft(v) => list.insert(0u, v),
PUTs::PutRight(v) => list.push(v),
PUTs::Terminate => return,
};
}
)
} else {
let m = prx.recv();
match m {
PUTs::PutLeft(v) => list.insert(0u, v),
PUTs::PutRight(v) => list.push(v),
PUTs::Terminate => return,
};
}
}
});
let (rtx,rrx) = channel();
gtx.send(GETs::GetLeft(rtx.clone()));
gtx.send(GETs::GetRight(rtx.clone()));
ptx.send(PUTs::PutLeft(10u));
ptx.send(PUTs::PutRight(11u));
let v = rrx.recv();
assert!(v == Some(10u));
let v = rrx.recv();
assert!(v == Some(11u));
ptx.send(PUTs::Terminate);
}
```
"#]
pub fn e_select_version() {
let (ptx,prx) = channel();
let (gtx,grx) = channel();
let _t = Thread::spawn(move || {
let mut list : Vec<uint> = Vec::new();
loop {
if list.len() > 0 { // select! does not (yet) support a guard expression...
select!(
m = grx.recv() => {
match m {
GETs::GetLeft(tx) => tx.send(list.remove(0)),
GETs::GetRight(tx)=> tx.send(list.pop()),
};
},
m = prx.recv() => {
match m {
PUTs::PutLeft(v) => list.insert(0u, v),
PUTs::PutRight(v) => list.push(v),
PUTs::Terminate => return,
};
}
)
} else {
let m = prx.recv();
match m {
PUTs::PutLeft(v) => list.insert(0u, v),
PUTs::PutRight(v) => list.push(v),
PUTs::Terminate => return,
};
}
}
});
let (rtx,rrx) = channel();
gtx.send(GETs::GetLeft(rtx.clone()));
gtx.send(GETs::GetRight(rtx.clone()));
ptx.send(PUTs::PutLeft(10u));
ptx.send(PUTs::PutRight(11u));
let v = rrx.recv();
assert!(v == Some(10u));
let v = rrx.recv();
assert!(v == Some(11u));
ptx.send(PUTs::Terminate);
}
|
use hyper::Error as HyperError;
use serde_json::Error as SerdeError;
use postgres::Error as PostgresError;
#[derive(Debug)]
pub enum Error {
NotFound,
Hyper(HyperError),
Serde(SerdeError),
Postgres(PostgresError)
}
impl Error {
// pub fn from_serde(err: SerdeError) -> Error {
// Error::Serde(err)
// }
// return the error message in json format
pub fn json(self) -> String {
match self {
Error::NotFound => r#"{"error":{"code":"error/not-found"}}"#.to_string(),
Error::Hyper(_) => r#"{"error":{"code":"error/hyper"}}"#.to_string(),
Error::Serde(_) => r#"{"error":{"code":"error/serde-json"}}"#.to_string(),
Error::Postgres(_) => r#"{"error":{"code":"error/postgres-database"}}"#.to_string(),
}
}
}
macro_rules! impl_from {
($v:path, $t:ty) => {
impl From<$t> for Error {
fn from(err: $t) -> Self {
$v(err)
}
}
}
}
impl_from!(Error::Hyper, HyperError);
impl_from!(Error::Serde, SerdeError);
impl_from!(Error::Postgres, PostgresError); |
#[derive(Debug)]
struct DebugPrintable(i32);
fn main() {
println!("{:?}", DebugPrintable(10));
}
|
extern crate webdriver;
use webdriver::*;
use webdriver::messages::{LocationStrategy, ExecuteCmd};
use webdriver::firefox::GeckoDriver;
extern crate rustyline;
use rustyline::error::ReadlineError;
use rustyline::Editor;
fn execute_function(name: &str, args: &str, sess: &DriverSession) -> Result<(), Error> {
match name {
"back" => try!(sess.back()),
"go" => try!(sess.go(args)),
"refresh" => try!(sess.refresh()),
"source" => println!("{}", try!(sess.get_page_source())),
"url" => println!("{}", try!(sess.get_current_url())),
"innerhtml" => {
for (idx, elem) in sess.find_elements(args, LocationStrategy::Css)?.iter().enumerate() {
println!("#{} {}", idx, elem.inner_html()?);
}
}
"outerhtml" => {
for (idx, elem) in sess.find_elements(args, LocationStrategy::Css)?.iter().enumerate() {
println!("#{} {}", idx, elem.outer_html()?);
}
}
"windows" => {
for (idx, handle) in sess.get_window_handles()?.iter().enumerate() {
println!("#{} {}", idx, handle)
}
}
"execute" => {
let script = ExecuteCmd {
script: args.to_owned(),
args: vec![],
};
match sess.execute(script)? {
JsonValue::String(ref s) => println!("{}", s),
other => println!("{}", other),
}
}
_ => println!("Unknown function: \"{}\"", name),
}
Ok(())
}
fn execute(line: &str, sess: &DriverSession) -> Result<(), Error>{
let (cmd, args) = line.find(' ')
.map_or((line, "".as_ref()), |idx| line.split_at(idx));
execute_function(cmd, args, sess)
}
fn main() {
let gecko = GeckoDriver::spawn()
.expect("Unable to start geckodriver");
let sess = gecko.session()
.expect("Unable to start WebDriver session");
let mut rl = Editor::<()>::new();
loop {
let readline = rl.readline(">> ");
match readline {
Ok(line) => {
rl.add_history_entry(&line);
if let Err(err) = execute(line.trim_matches('\n'), &sess) {
println!("{}", err);
}
},
Err(ReadlineError::Interrupted) => {
break
},
Err(ReadlineError::Eof) => {
break
},
Err(err) => {
println!("Error: {:?}", err);
break
}
}
}
}
|
// Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use common_meta_sled_store::sled;
use common_meta_sled_store::SledBytesError;
use common_meta_sled_store::SledOrderedSerde;
use common_meta_types::anyerror::AnyError;
use common_meta_types::NodeId;
use common_meta_types::Vote;
use serde::Deserialize;
use serde::Serialize;
use sled::IVec;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum RaftStateKey {
/// The node id.
Id,
/// Hard state of the raft log, including `current_term` and `voted_vor`.
HardState,
/// The id of the only active state machine.
/// When installing a state machine snapshot:
/// 1. A temp state machine is written into a new sled::Tree.
/// 2. Update this field to point to the new state machine.
/// 3. Cleanup old state machine.
StateMachineId,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum RaftStateValue {
NodeId(NodeId),
HardState(Vote),
/// active state machine, previous state machine
StateMachineId((u64, u64)),
}
impl fmt::Display for RaftStateKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RaftStateKey::Id => {
write!(f, "Id")
}
RaftStateKey::HardState => {
write!(f, "HardState")
}
RaftStateKey::StateMachineId => {
write!(f, "StateMachineId")
}
}
}
}
impl SledOrderedSerde for RaftStateKey {
fn ser(&self) -> Result<IVec, SledBytesError> {
let i = match self {
RaftStateKey::Id => 1,
RaftStateKey::HardState => 2,
RaftStateKey::StateMachineId => 3,
};
Ok(IVec::from(&[i]))
}
fn de<V: AsRef<[u8]>>(v: V) -> Result<Self, SledBytesError>
where Self: Sized {
let slice = v.as_ref();
if slice[0] == 1 {
return Ok(RaftStateKey::Id);
} else if slice[0] == 2 {
return Ok(RaftStateKey::HardState);
} else if slice[0] == 3 {
return Ok(RaftStateKey::StateMachineId);
}
Err(SledBytesError::new(&AnyError::error("invalid key IVec")))
}
}
impl From<RaftStateValue> for NodeId {
fn from(v: RaftStateValue) -> Self {
match v {
RaftStateValue::NodeId(x) => x,
_ => panic!("expect NodeId"),
}
}
}
impl From<RaftStateValue> for Vote {
fn from(v: RaftStateValue) -> Self {
match v {
RaftStateValue::HardState(x) => x,
_ => panic!("expect HardState"),
}
}
}
impl From<RaftStateValue> for (u64, u64) {
fn from(v: RaftStateValue) -> Self {
match v {
RaftStateValue::StateMachineId(x) => x,
_ => panic!("expect StateMachineId"),
}
}
}
pub(crate) mod compat_with_07 {
use common_meta_sled_store::SledBytesError;
use common_meta_sled_store::SledSerde;
use common_meta_types::compat07;
use common_meta_types::NodeId;
use openraft::compat::Upgrade;
use crate::state::RaftStateValue;
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub enum RaftStateValueCompat {
NodeId(NodeId),
HardState(compat07::Vote),
StateMachineId((u64, u64)),
}
impl Upgrade<RaftStateValue> for RaftStateValueCompat {
#[rustfmt::skip]
fn upgrade(self) -> RaftStateValue {
match self{
Self::NodeId(nid) => RaftStateValue::NodeId(nid),
Self::HardState(v) => RaftStateValue::HardState(v.upgrade()),
Self::StateMachineId(x) => RaftStateValue::StateMachineId(x),
}
}
}
impl SledSerde for RaftStateValue {
fn de<T: AsRef<[u8]>>(v: T) -> Result<Self, SledBytesError>
where Self: Sized {
let s: RaftStateValueCompat = serde_json::from_slice(v.as_ref())?;
let v = match s {
RaftStateValueCompat::NodeId(nid) => Self::NodeId(nid),
RaftStateValueCompat::HardState(h) => Self::HardState(h.upgrade()),
RaftStateValueCompat::StateMachineId(x) => Self::StateMachineId(x),
};
Ok(v)
}
}
}
|
//! User LEDs
//!
//! - Red = Pin 22
//! - Green = Pin 19
//! - Blue = Pin 21
use e310x::GPIO0;
use gpio::{PinConfig, Pin22, Pin19, Pin21};
pub fn init(gpio: &GPIO0) {
Pin22::set_invert(gpio, true);
Pin22::init(gpio, PinConfig::Output);
Pin19::set_invert(gpio, true);
Pin19::init(gpio, PinConfig::Output);
Pin21::set_invert(gpio, true);
Pin21::init(gpio, PinConfig::Output);
}
#[macro_export]
macro_rules! led {
($Color:ident, $Pin:ident) => {
pub struct $Color;
impl $Color {
pub fn on(gpio: &GPIO0) {
$Pin::high(gpio);
}
pub fn off(gpio: &GPIO0) {
$Pin::low(gpio);
}
pub fn toggle(gpio: &GPIO0) {
$Pin::toggle(gpio);
}
}
}
}
led!(Red, Pin22);
led!(Green, Pin19);
led!(Blue, Pin21);
|
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use diagnostics_testing::EnvWithDiagnostics;
use fuchsia_async as fasync;
use fuchsia_syslog as syslog;
use futures::prelude::*;
#[fasync::run_singlethreaded(test)]
async fn launch_example_and_read_hello_world() {
let mut test_env = EnvWithDiagnostics::new().await;
let url = "fuchsia-pkg://fuchsia.com/rust-logs-example-tests#meta/rust-logs-example.cmx";
let status = test_env.launch(url, None).app.wait().await.unwrap();
assert!(status.success());
let logs = test_env.listen_to_logs().take(3).collect::<Vec<_>>().await;
let mut logs = logs.into_iter();
let next = logs.next().unwrap();
assert_eq!(next.severity, syslog::levels::DEBUG);
assert_eq!(next.tags, vec!["rust_logs_example"]);
assert_eq!(next.msg, "should print ");
assert_ne!(next.pid, 0);
assert_ne!(next.tid, 0);
let next = logs.next().unwrap();
assert_eq!(next.severity, syslog::levels::INFO);
assert_eq!(next.tags, vec!["rust_logs_example"]);
assert_eq!(next.msg, "hello, world! foo=1 bar=\"baz\" ");
assert_ne!(next.pid, 0);
assert_ne!(next.tid, 0);
let next = logs.next().unwrap();
assert_eq!(next.severity, syslog::levels::WARN);
assert_eq!(next.tags, vec!["rust_logs_example"]);
assert_eq!(next.msg, "warning: using old api");
assert_ne!(next.pid, 0);
assert_ne!(next.tid, 0);
}
|
/*
OOP languages share certain common characteristics, namely objects,
encapsulation, and inheritance. The Gang of Four book defines OOP this way:
Object-oriented programs are made up of objects. An object packages both data
and the procedures that operate on that data. The procedures are typically
called methods or operations. Using this definition, Rust is object oriented:
structs and enums have data, and impl blocks provide methods on structs and
enums.
Inheritance is a mechanism whereby an object can inherit from another object’s
definition, thus gaining the parent object’s data and behavior without you
having to define them again. If a language must have inheritance to be an
object-oriented language, then Rust is not one. There is no way to define a
struct that inherits the parent struct’s fields and method implementations.
You choose inheritance for two main reasons. One is for reuse of code: you can
implement particular behavior for one type, and inheritance enables you to reuse
that implementation for a different type. You can share Rust code using default
trait method implementations instead.
The other reason to use inheritance relates to the type system: to enable a
child type to be used in the same places as the parent type. This is also called
polymorphism, which means that you can substitute multiple objects for each
other at runtime if they share certain characteristics. Rust instead uses
generics to abstract over different possible types and trait bounds to impose
constraints on what those types must provide. This is sometimes called bounded
parametric polymorphism.
*/
#[derive(Debug)]
pub struct AveragedCollection {
list: Vec<i32>,
average: f64,
}
impl AveragedCollection {
pub fn add(&mut self, value: i32) {
self.list.push(value);
self.update_average();
}
pub fn remove(&mut self) -> Option<i32> {
let result = self.list.pop();
match result {
Some(value) => {
self.update_average();
Some(value)
},
None => None,
}
}
pub fn average(&self) -> f64 {
self.average
}
fn update_average(&mut self) {
let total: i32 = self.list.iter().sum();
self.average = total as f64 / self.list.len() as f64;
}
}
fn main() {
let mut collection = AveragedCollection{list: vec![], average: 0.0};
collection.add(2);
collection.add(4);
collection.add(6);
collection.remove();
println!("average is {}.", collection.average());
println!("collection is {:?}.", collection);
}
|
#[doc = "Reader of register MPCBB2_VCTR20"]
pub type R = crate::R<u32, super::MPCBB2_VCTR20>;
#[doc = "Writer for register MPCBB2_VCTR20"]
pub type W = crate::W<u32, super::MPCBB2_VCTR20>;
#[doc = "Register MPCBB2_VCTR20 `reset()`'s with value 0xffff_ffff"]
impl crate::ResetValue for super::MPCBB2_VCTR20 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0xffff_ffff
}
}
#[doc = "Reader of field `B640`"]
pub type B640_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B640`"]
pub struct B640_W<'a> {
w: &'a mut W,
}
impl<'a> B640_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `B641`"]
pub type B641_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B641`"]
pub struct B641_W<'a> {
w: &'a mut W,
}
impl<'a> B641_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `B642`"]
pub type B642_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B642`"]
pub struct B642_W<'a> {
w: &'a mut W,
}
impl<'a> B642_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `B643`"]
pub type B643_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B643`"]
pub struct B643_W<'a> {
w: &'a mut W,
}
impl<'a> B643_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `B644`"]
pub type B644_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B644`"]
pub struct B644_W<'a> {
w: &'a mut W,
}
impl<'a> B644_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `B645`"]
pub type B645_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B645`"]
pub struct B645_W<'a> {
w: &'a mut W,
}
impl<'a> B645_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `B646`"]
pub type B646_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B646`"]
pub struct B646_W<'a> {
w: &'a mut W,
}
impl<'a> B646_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `B647`"]
pub type B647_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B647`"]
pub struct B647_W<'a> {
w: &'a mut W,
}
impl<'a> B647_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Reader of field `B648`"]
pub type B648_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B648`"]
pub struct B648_W<'a> {
w: &'a mut W,
}
impl<'a> B648_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `B649`"]
pub type B649_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B649`"]
pub struct B649_W<'a> {
w: &'a mut W,
}
impl<'a> B649_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `B650`"]
pub type B650_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B650`"]
pub struct B650_W<'a> {
w: &'a mut W,
}
impl<'a> B650_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Reader of field `B651`"]
pub type B651_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B651`"]
pub struct B651_W<'a> {
w: &'a mut W,
}
impl<'a> B651_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Reader of field `B652`"]
pub type B652_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B652`"]
pub struct B652_W<'a> {
w: &'a mut W,
}
impl<'a> B652_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
#[doc = "Reader of field `B653`"]
pub type B653_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B653`"]
pub struct B653_W<'a> {
w: &'a mut W,
}
impl<'a> B653_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "Reader of field `B654`"]
pub type B654_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B654`"]
pub struct B654_W<'a> {
w: &'a mut W,
}
impl<'a> B654_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14);
self.w
}
}
#[doc = "Reader of field `B655`"]
pub type B655_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B655`"]
pub struct B655_W<'a> {
w: &'a mut W,
}
impl<'a> B655_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15);
self.w
}
}
#[doc = "Reader of field `B656`"]
pub type B656_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B656`"]
pub struct B656_W<'a> {
w: &'a mut W,
}
impl<'a> B656_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Reader of field `B657`"]
pub type B657_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B657`"]
pub struct B657_W<'a> {
w: &'a mut W,
}
impl<'a> B657_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Reader of field `B658`"]
pub type B658_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B658`"]
pub struct B658_W<'a> {
w: &'a mut W,
}
impl<'a> B658_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Reader of field `B659`"]
pub type B659_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B659`"]
pub struct B659_W<'a> {
w: &'a mut W,
}
impl<'a> B659_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19);
self.w
}
}
#[doc = "Reader of field `B660`"]
pub type B660_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B660`"]
pub struct B660_W<'a> {
w: &'a mut W,
}
impl<'a> B660_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
#[doc = "Reader of field `B661`"]
pub type B661_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B661`"]
pub struct B661_W<'a> {
w: &'a mut W,
}
impl<'a> B661_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 21)) | (((value as u32) & 0x01) << 21);
self.w
}
}
#[doc = "Reader of field `B662`"]
pub type B662_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B662`"]
pub struct B662_W<'a> {
w: &'a mut W,
}
impl<'a> B662_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 22)) | (((value as u32) & 0x01) << 22);
self.w
}
}
#[doc = "Reader of field `B663`"]
pub type B663_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B663`"]
pub struct B663_W<'a> {
w: &'a mut W,
}
impl<'a> B663_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 23)) | (((value as u32) & 0x01) << 23);
self.w
}
}
#[doc = "Reader of field `B664`"]
pub type B664_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B664`"]
pub struct B664_W<'a> {
w: &'a mut W,
}
impl<'a> B664_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
#[doc = "Reader of field `B665`"]
pub type B665_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B665`"]
pub struct B665_W<'a> {
w: &'a mut W,
}
impl<'a> B665_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 25)) | (((value as u32) & 0x01) << 25);
self.w
}
}
#[doc = "Reader of field `B666`"]
pub type B666_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B666`"]
pub struct B666_W<'a> {
w: &'a mut W,
}
impl<'a> B666_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26);
self.w
}
}
#[doc = "Reader of field `B667`"]
pub type B667_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B667`"]
pub struct B667_W<'a> {
w: &'a mut W,
}
impl<'a> B667_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 27)) | (((value as u32) & 0x01) << 27);
self.w
}
}
#[doc = "Reader of field `B668`"]
pub type B668_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B668`"]
pub struct B668_W<'a> {
w: &'a mut W,
}
impl<'a> B668_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 28)) | (((value as u32) & 0x01) << 28);
self.w
}
}
#[doc = "Reader of field `B669`"]
pub type B669_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B669`"]
pub struct B669_W<'a> {
w: &'a mut W,
}
impl<'a> B669_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 29)) | (((value as u32) & 0x01) << 29);
self.w
}
}
#[doc = "Reader of field `B670`"]
pub type B670_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B670`"]
pub struct B670_W<'a> {
w: &'a mut W,
}
impl<'a> B670_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30);
self.w
}
}
#[doc = "Reader of field `B671`"]
pub type B671_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B671`"]
pub struct B671_W<'a> {
w: &'a mut W,
}
impl<'a> B671_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bit 0 - B640"]
#[inline(always)]
pub fn b640(&self) -> B640_R {
B640_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - B641"]
#[inline(always)]
pub fn b641(&self) -> B641_R {
B641_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - B642"]
#[inline(always)]
pub fn b642(&self) -> B642_R {
B642_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - B643"]
#[inline(always)]
pub fn b643(&self) -> B643_R {
B643_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - B644"]
#[inline(always)]
pub fn b644(&self) -> B644_R {
B644_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - B645"]
#[inline(always)]
pub fn b645(&self) -> B645_R {
B645_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - B646"]
#[inline(always)]
pub fn b646(&self) -> B646_R {
B646_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - B647"]
#[inline(always)]
pub fn b647(&self) -> B647_R {
B647_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 8 - B648"]
#[inline(always)]
pub fn b648(&self) -> B648_R {
B648_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - B649"]
#[inline(always)]
pub fn b649(&self) -> B649_R {
B649_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 10 - B650"]
#[inline(always)]
pub fn b650(&self) -> B650_R {
B650_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 11 - B651"]
#[inline(always)]
pub fn b651(&self) -> B651_R {
B651_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - B652"]
#[inline(always)]
pub fn b652(&self) -> B652_R {
B652_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 13 - B653"]
#[inline(always)]
pub fn b653(&self) -> B653_R {
B653_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 14 - B654"]
#[inline(always)]
pub fn b654(&self) -> B654_R {
B654_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 15 - B655"]
#[inline(always)]
pub fn b655(&self) -> B655_R {
B655_R::new(((self.bits >> 15) & 0x01) != 0)
}
#[doc = "Bit 16 - B656"]
#[inline(always)]
pub fn b656(&self) -> B656_R {
B656_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 17 - B657"]
#[inline(always)]
pub fn b657(&self) -> B657_R {
B657_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - B658"]
#[inline(always)]
pub fn b658(&self) -> B658_R {
B658_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 19 - B659"]
#[inline(always)]
pub fn b659(&self) -> B659_R {
B659_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 20 - B660"]
#[inline(always)]
pub fn b660(&self) -> B660_R {
B660_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bit 21 - B661"]
#[inline(always)]
pub fn b661(&self) -> B661_R {
B661_R::new(((self.bits >> 21) & 0x01) != 0)
}
#[doc = "Bit 22 - B662"]
#[inline(always)]
pub fn b662(&self) -> B662_R {
B662_R::new(((self.bits >> 22) & 0x01) != 0)
}
#[doc = "Bit 23 - B663"]
#[inline(always)]
pub fn b663(&self) -> B663_R {
B663_R::new(((self.bits >> 23) & 0x01) != 0)
}
#[doc = "Bit 24 - B664"]
#[inline(always)]
pub fn b664(&self) -> B664_R {
B664_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 25 - B665"]
#[inline(always)]
pub fn b665(&self) -> B665_R {
B665_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bit 26 - B666"]
#[inline(always)]
pub fn b666(&self) -> B666_R {
B666_R::new(((self.bits >> 26) & 0x01) != 0)
}
#[doc = "Bit 27 - B667"]
#[inline(always)]
pub fn b667(&self) -> B667_R {
B667_R::new(((self.bits >> 27) & 0x01) != 0)
}
#[doc = "Bit 28 - B668"]
#[inline(always)]
pub fn b668(&self) -> B668_R {
B668_R::new(((self.bits >> 28) & 0x01) != 0)
}
#[doc = "Bit 29 - B669"]
#[inline(always)]
pub fn b669(&self) -> B669_R {
B669_R::new(((self.bits >> 29) & 0x01) != 0)
}
#[doc = "Bit 30 - B670"]
#[inline(always)]
pub fn b670(&self) -> B670_R {
B670_R::new(((self.bits >> 30) & 0x01) != 0)
}
#[doc = "Bit 31 - B671"]
#[inline(always)]
pub fn b671(&self) -> B671_R {
B671_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - B640"]
#[inline(always)]
pub fn b640(&mut self) -> B640_W {
B640_W { w: self }
}
#[doc = "Bit 1 - B641"]
#[inline(always)]
pub fn b641(&mut self) -> B641_W {
B641_W { w: self }
}
#[doc = "Bit 2 - B642"]
#[inline(always)]
pub fn b642(&mut self) -> B642_W {
B642_W { w: self }
}
#[doc = "Bit 3 - B643"]
#[inline(always)]
pub fn b643(&mut self) -> B643_W {
B643_W { w: self }
}
#[doc = "Bit 4 - B644"]
#[inline(always)]
pub fn b644(&mut self) -> B644_W {
B644_W { w: self }
}
#[doc = "Bit 5 - B645"]
#[inline(always)]
pub fn b645(&mut self) -> B645_W {
B645_W { w: self }
}
#[doc = "Bit 6 - B646"]
#[inline(always)]
pub fn b646(&mut self) -> B646_W {
B646_W { w: self }
}
#[doc = "Bit 7 - B647"]
#[inline(always)]
pub fn b647(&mut self) -> B647_W {
B647_W { w: self }
}
#[doc = "Bit 8 - B648"]
#[inline(always)]
pub fn b648(&mut self) -> B648_W {
B648_W { w: self }
}
#[doc = "Bit 9 - B649"]
#[inline(always)]
pub fn b649(&mut self) -> B649_W {
B649_W { w: self }
}
#[doc = "Bit 10 - B650"]
#[inline(always)]
pub fn b650(&mut self) -> B650_W {
B650_W { w: self }
}
#[doc = "Bit 11 - B651"]
#[inline(always)]
pub fn b651(&mut self) -> B651_W {
B651_W { w: self }
}
#[doc = "Bit 12 - B652"]
#[inline(always)]
pub fn b652(&mut self) -> B652_W {
B652_W { w: self }
}
#[doc = "Bit 13 - B653"]
#[inline(always)]
pub fn b653(&mut self) -> B653_W {
B653_W { w: self }
}
#[doc = "Bit 14 - B654"]
#[inline(always)]
pub fn b654(&mut self) -> B654_W {
B654_W { w: self }
}
#[doc = "Bit 15 - B655"]
#[inline(always)]
pub fn b655(&mut self) -> B655_W {
B655_W { w: self }
}
#[doc = "Bit 16 - B656"]
#[inline(always)]
pub fn b656(&mut self) -> B656_W {
B656_W { w: self }
}
#[doc = "Bit 17 - B657"]
#[inline(always)]
pub fn b657(&mut self) -> B657_W {
B657_W { w: self }
}
#[doc = "Bit 18 - B658"]
#[inline(always)]
pub fn b658(&mut self) -> B658_W {
B658_W { w: self }
}
#[doc = "Bit 19 - B659"]
#[inline(always)]
pub fn b659(&mut self) -> B659_W {
B659_W { w: self }
}
#[doc = "Bit 20 - B660"]
#[inline(always)]
pub fn b660(&mut self) -> B660_W {
B660_W { w: self }
}
#[doc = "Bit 21 - B661"]
#[inline(always)]
pub fn b661(&mut self) -> B661_W {
B661_W { w: self }
}
#[doc = "Bit 22 - B662"]
#[inline(always)]
pub fn b662(&mut self) -> B662_W {
B662_W { w: self }
}
#[doc = "Bit 23 - B663"]
#[inline(always)]
pub fn b663(&mut self) -> B663_W {
B663_W { w: self }
}
#[doc = "Bit 24 - B664"]
#[inline(always)]
pub fn b664(&mut self) -> B664_W {
B664_W { w: self }
}
#[doc = "Bit 25 - B665"]
#[inline(always)]
pub fn b665(&mut self) -> B665_W {
B665_W { w: self }
}
#[doc = "Bit 26 - B666"]
#[inline(always)]
pub fn b666(&mut self) -> B666_W {
B666_W { w: self }
}
#[doc = "Bit 27 - B667"]
#[inline(always)]
pub fn b667(&mut self) -> B667_W {
B667_W { w: self }
}
#[doc = "Bit 28 - B668"]
#[inline(always)]
pub fn b668(&mut self) -> B668_W {
B668_W { w: self }
}
#[doc = "Bit 29 - B669"]
#[inline(always)]
pub fn b669(&mut self) -> B669_W {
B669_W { w: self }
}
#[doc = "Bit 30 - B670"]
#[inline(always)]
pub fn b670(&mut self) -> B670_W {
B670_W { w: self }
}
#[doc = "Bit 31 - B671"]
#[inline(always)]
pub fn b671(&mut self) -> B671_W {
B671_W { w: self }
}
}
|
// Copyright 2021 Datafuse Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use common_base::base::tokio;
use databend_query::api::http::v1::config::config_handler;
use poem::get;
use poem::http::Method;
use poem::http::StatusCode;
use poem::http::Uri;
use poem::Endpoint;
use poem::Request;
use poem::Route;
use pretty_assertions::assert_eq; // for `app.oneshot()`
use crate::tests::TestGlobalServices;
#[tokio::test(flavor = "multi_thread")]
async fn test_config() -> common_exception::Result<()> {
let _guard = TestGlobalServices::setup(crate::tests::ConfigBuilder::create().build()).await?;
let cluster_router = Route::new().at("/v1/config", get(config_handler));
let response = cluster_router
.call(
Request::builder()
.uri(Uri::from_static("/v1/config"))
.method(Method::GET)
.finish(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
Ok(())
}
|
extern crate sa2_set;
use std::env;
use std::fs::File;
use sa2_set::{SetFile, Dreamcast, GameCube, Pc};
fn main() {
let mut args = env::args().skip(1);
let platform = args.next().unwrap();
let filename = args.next().unwrap();
let mut f = File::open(filename).unwrap();
let set = match platform.as_str() {
"d" => SetFile::from_read::<Dreamcast, _>(&mut f).unwrap(),
"g" => SetFile::from_read::<GameCube, _>(&mut f).unwrap(),
"p" => SetFile::from_read::<Pc, _>(&mut f).unwrap(),
_ => panic!("Bad platform id."),
};
let SetFile(objs) = set;
for (idx, obj) in objs.into_iter().enumerate() {
println!("obj {}: {:#?}", idx, obj);
}
}
|
use super::{
attribute_parsing::{MethodWithAttrs, SabiTraitAttrs},
impl_interfacetype::{TraitStruct, WhichTrait, TRAIT_LIST},
lifetime_unelider::BorrowKind,
parse_utils::{parse_str_as_ident, parse_str_as_trait_bound, parse_str_as_type},
replace_self_path::{self, ReplaceWith},
*,
};
use crate::{
set_span_visitor::SetSpanVisitor,
utils::{dummy_ident, LinearResult, SynResultExt},
};
use as_derive_utils::{return_spanned_err, spanned_err, syn_err};
use std::{
collections::{HashMap, HashSet},
iter,
};
use core_extensions::{matches, IteratorExt};
use syn::{
punctuated::Punctuated,
spanned::Spanned,
token::Unsafe,
token::{Colon, Comma, Semi},
visit_mut::VisitMut,
Abi, Attribute, Block, FnArg, Ident, ItemTrait, Lifetime, LifetimeDef, TraitItem,
TypeParamBound, WherePredicate,
};
use proc_macro2::Span;
#[derive(Debug, Clone)]
pub struct AssocTyWithIndex {
pub index: usize,
pub assoc_ty: syn::TraitItemType,
}
////////////////////////////////////////////////////////////////////////////////
/// Represents a trait for use in `#[sabi_trait]`.
#[derive(Debug, Clone)]
pub(crate) struct TraitDefinition<'a> {
pub(crate) item: &'a ItemTrait,
/// The name of the trait.
pub(crate) name: &'a Ident,
/// What type to use the backend for the trait object,DynTrait or RObject.
pub(crate) which_object: WhichObject,
/// The where predicates in the where clause of the trait,
/// if it doesn't have one this is empty.
pub(crate) where_preds: Punctuated<WherePredicate, Comma>,
/// Attributes applied to the vtable.
pub(crate) derive_attrs: &'a [Attribute],
/// Attributes applied to the trait.
pub(crate) other_attrs: &'a [Attribute],
pub(crate) generics: &'a syn::Generics,
/// The `Iterator::Item` type for this trait,
/// None if it doesn't have Iterator as a supertrait.
pub(crate) iterator_item: Option<&'a syn::Type>,
#[allow(dead_code)]
/// The path for the implemented serde::Deserialize trait
/// (it may reference some trait lifetime parameter)
pub(crate) deserialize_bound: Option<DeserializeBound>,
/// The traits this has as supertraits.
pub(crate) impld_traits: Vec<TraitImplness<'a>>,
/// The traits this doesn't have as supertraits.
pub(crate) unimpld_traits: Vec<&'a Ident>,
/// A struct describing the traits this does and doesn't have as supertraits
/// (true means implemented,false means unimplemented)
pub(crate) trait_flags: TraitStruct<bool>,
/// The region of code of the identifiers for the supertraits,
/// with `Span::call_site()` for the ones that aren't supertraits.
pub(crate) trait_spans: TraitStruct<Span>,
/// The lifetimes declared in the trait generic parameter list that are used in
/// `&'lifetime self` `&'lifetime mut self` method receivers,
/// or used directly as supertraits.
pub(crate) lifetime_bounds: Punctuated<&'a Lifetime, Comma>,
/// The visibility of the trait.
pub(crate) vis: VisibilityKind<'a>,
/// The visibility of the trait,inside a submodule.
pub(crate) submod_vis: RelativeVis<'a>,
// The keys use the proginal identifier for the associated type.
pub(crate) assoc_tys: HashMap<&'a Ident, AssocTyWithIndex>,
///
pub(crate) methods: Vec<TraitMethod<'a>>,
/// Whether this has by mutable reference methods.
pub(crate) has_mut_methods: bool,
/// Whether this has by-value methods.
pub(crate) has_val_methods: bool,
/// Disables `ìmpl Trait for Trait_TO`
pub(crate) disable_trait_impl: bool,
/// Whether this has `'static` as a supertrait syntactically.
pub(crate) is_static: IsStaticTrait,
/// A TokenStream with the equivalent of `<Pointer::PtrTarget as Trait>::`
pub(crate) ts_fq_self: &'a TokenStream2,
pub(crate) ctokens: &'a CommonTokens,
}
////////////////////////////////////////////////////////////////////////////////
impl<'a> TraitDefinition<'a> {
pub(super) fn new(
trait_: &'a ItemTrait,
SabiTraitAttrs {
attrs,
methods_with_attrs,
which_object,
disable_trait_impl,
disable_inherent_default,
..
}: SabiTraitAttrs<'a>,
arenas: &'a Arenas,
ctokens: &'a CommonTokens,
) -> Result<Self, syn::Error> {
let vis = VisibilityKind::new(&trait_.vis);
let submod_vis = vis.submodule_level(1);
let mut assoc_tys = HashMap::default();
let mut methods = Vec::<TraitMethod<'a>>::new();
let mut errors = LinearResult::ok(());
methods_with_attrs
.into_iter()
.zip(disable_inherent_default)
.filter_map(|(func, disable_inh_def)| {
match TraitMethod::new(func, disable_inh_def, ctokens, arenas) {
Ok(x) => x,
Err(e) => {
errors.push_err(e);
None
}
}
})
.extending(&mut methods);
/////////////////////////////////////////////////////
//// Processing the supertrait bounds
let mut is_static = IsStaticTrait::No;
let lifetime_params: HashSet<&'a Lifetime> = trait_
.generics
.lifetimes()
.map(|l| &l.lifetime)
.chain(iter::once(&ctokens.static_lifetime))
.collect();
let GetSupertraits {
impld_traits,
unimpld_traits,
mut lifetime_bounds,
iterator_item,
deserialize_bound,
trait_flags,
trait_spans,
errors: supertrait_errors,
} = get_supertraits(
&trait_.supertraits,
&lifetime_params,
which_object,
arenas,
ctokens,
);
errors.combine_err(supertrait_errors.into());
// Adding the lifetime parameters in `&'a self` and `&'a mut self`
// that were declared in the trait generic parameter list.
// This is done because those lifetime bounds are enforced as soon as
// the vtable is created,instead of when the methods are called
// (it's enforced in method calls in regular trait objects).
for method in &methods {
if let SelfParam::ByRef {
lifetime: Some(lt), ..
} = method.self_param
{
if lifetime_params.contains(lt) {
lifetime_bounds.push(lt);
}
}
}
for lt in &lifetime_bounds {
if lt.ident == "static" {
is_static = IsStaticTrait::Yes;
}
}
/////////////////////////////////////////////////////
let mut assoc_ty_index = 0;
for item in &trait_.items {
match item {
TraitItem::Method { .. } => {}
TraitItem::Type(assoc_ty) => {
let with_index = AssocTyWithIndex {
index: assoc_ty_index,
assoc_ty: assoc_ty.clone(),
};
assoc_tys.insert(&assoc_ty.ident, with_index);
assoc_ty_index += 1;
}
item => errors.push_err(spanned_err!(
item,
"Associated item not compatible with #[sabi_trait]",
)),
}
}
let has_mut_methods = methods.iter().any(|m| {
matches!(
&m.self_param,
SelfParam::ByRef {
is_mutable: true,
..
}
)
});
let has_val_methods = methods
.iter()
.any(|m| matches!(&m.self_param, SelfParam::ByVal));
let ts_fq_self = {
let (_, generics_params, _) = trait_.generics.split_for_impl();
quote!( <_OrigPtr::PtrTarget as __Trait #generics_params >:: )
};
errors.into_result()?;
Ok(TraitDefinition {
item: trait_,
name: &trait_.ident,
which_object,
where_preds: trait_
.generics
.where_clause
.as_ref()
.map(|wc| wc.predicates.clone())
.unwrap_or_default(),
derive_attrs: arenas.alloc(attrs.derive_attrs),
other_attrs: arenas.alloc(attrs.other_attrs),
generics: &trait_.generics,
lifetime_bounds,
iterator_item,
deserialize_bound,
impld_traits,
unimpld_traits,
trait_flags,
trait_spans,
vis,
submod_vis,
assoc_tys,
methods,
has_mut_methods,
has_val_methods,
disable_trait_impl,
ts_fq_self: arenas.alloc(ts_fq_self),
is_static,
ctokens,
})
}
/// Returns a clone of `self`,
/// where usages of associated types are replaced for use in `which_item`.
pub fn replace_self(&self, which_item: WhichItem) -> Result<Self, syn::Error> {
let mut this = self.clone();
let ctokens = self.ctokens;
let mut errors = LinearResult::ok(());
let replace_with = match which_item {
WhichItem::Trait | WhichItem::TraitImpl => {
return Ok(this);
}
WhichItem::TraitObjectImpl => ReplaceWith::Remove,
WhichItem::VtableDecl => ReplaceWith::Remove,
WhichItem::VtableImpl => ReplaceWith::Ident(ctokens.u_capself.clone()),
};
let is_assoc_type = |ident: &Ident| {
if self.assoc_tys.contains_key(ident) {
Some(ReplaceWith::Keep)
} else {
None
}
};
for where_pred in &mut this.where_preds {
replace_self_path::replace_self_path(where_pred, replace_with.clone(), is_assoc_type)
.combine_into_err(&mut errors);
}
for assoc_ty in this.assoc_tys.values_mut() {
replace_self_path::replace_self_path(
&mut assoc_ty.assoc_ty,
replace_with.clone(),
is_assoc_type,
)
.combine_into_err(&mut errors);
}
for method in &mut this.methods {
method
.replace_self(replace_with.clone(), is_assoc_type)
.combine_into_err(&mut errors);
}
errors.into_result().map(|_| this)
}
/// Returns a tokenizer for the generic parameters in this trait.
///
/// # Parameters
///
/// - `in_what`:
/// Determines where the generic parameters are printed.
/// Eg:impl headers,trait declaration,trait usage.
///
/// - `with_assoc_tys`:
/// Whether associated types are printed,and how.
///
/// - `after_lifetimes`:
/// What will be printed after lifetime parameters.
///
pub fn generics_tokenizer(
&self,
in_what: InWhat,
with_assoc_tys: WithAssocTys,
after_lifetimes: &'a TokenStream2,
) -> GenericsTokenizer<'_> {
let ctokens = self.ctokens;
GenericsTokenizer {
gen_params_in: GenParamsIn::with_after_lifetimes(
self.generics,
in_what,
after_lifetimes,
),
assoc_tys: match with_assoc_tys {
WithAssocTys::Yes(WhichSelf::Regular) => {
Some((&self.assoc_tys, &ctokens.ts_self_colon2))
}
WithAssocTys::Yes(WhichSelf::Underscore) => {
Some((&self.assoc_tys, &ctokens.ts_uself_colon2))
}
WithAssocTys::Yes(WhichSelf::FullyQualified) => {
Some((&self.assoc_tys, self.ts_fq_self))
}
WithAssocTys::Yes(WhichSelf::NoSelf) => Some((&self.assoc_tys, &ctokens.empty_ts)),
WithAssocTys::No => None,
},
}
}
/// Returns the where predicates for the erased pointer type of the ffi-safe trait object.
///
/// Example erased pointer types:`RBox<()>`,`RArc<()>`,`&()`,`&mut ()`
///
pub fn erased_ptr_preds(&self) -> &'a TokenStream2 {
let ctokens = self.ctokens;
match (self.has_mut_methods, self.has_val_methods) {
(false, false) => &ctokens.ptr_ref_bound,
(false, true) => &ctokens.ptr_ref_val_bound,
(true, false) => &ctokens.ptr_mut_bound,
(true, true) => &ctokens.ptr_mut_val_bound,
}
}
/// Returns the where predicates of the inherent implementation of
/// the ffi-safe trait object.
pub fn trait_impl_where_preds(&self) -> Result<Punctuated<WherePredicate, Comma>, syn::Error> {
let mut where_preds = self.where_preds.clone();
let mut errors = LinearResult::ok(());
for where_pred in &mut where_preds {
replace_self_path::replace_self_path(where_pred, ReplaceWith::Remove, |ident| {
self.assoc_tys.get(ident).map(|_| ReplaceWith::Remove)
})
.combine_into_err(&mut errors);
}
errors.into_result().map(|_| where_preds)
}
/// Returns a tokenizer that outputs the method definitions inside the `which_item` item.
pub fn methods_tokenizer(&self, which_item: WhichItem) -> MethodsTokenizer<'_> {
MethodsTokenizer {
trait_def: self,
which_item,
}
}
}
////////////////////////////////////////////////////////////////////////////////
/// Represents a trait method for use in `#[sabi_trait]`.
#[derive(Debug, Clone)]
pub(crate) struct TraitMethod<'a> {
pub(crate) disable_inherent_default: bool,
pub(crate) unsafety: Option<&'a Unsafe>,
pub(crate) abi: Option<&'a Abi>,
/// Attributes applied to the method in the vtable.
pub(crate) derive_attrs: &'a [Attribute],
/// Attributes applied to the method in the trait definition.
pub(crate) other_attrs: &'a [Attribute],
/// The name of the method.
pub(crate) name: &'a Ident,
pub(crate) self_param: SelfParam<'a>,
/// The lifetime parameters of this method.
pub(crate) lifetimes: Vec<&'a LifetimeDef>,
pub(crate) params: Vec<MethodParam<'a>>,
/// The return type of this method,if None this returns `()`.
pub(crate) output: Option<syn::Type>,
/// Whether the return type borrows from self
pub(crate) return_borrow_kind: Option<BorrowKind>,
pub(crate) where_clause: MethodWhereClause<'a>,
/// The default implementation of the method.
pub(crate) default: Option<DefaultMethod<'a>>,
/// The semicolon token for the method
/// (when the method did not have a default implementation).
pub(crate) semicolon: Option<&'a Semi>,
}
#[derive(Debug, Clone)]
pub(crate) struct DefaultMethod<'a> {
pub(crate) block: &'a Block,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct MethodParam<'a> {
/// The name of the method parameter,
/// which is `param_<number_of_parameter>` if the parameter is not just an identifier
/// (ie:`(left,right)`,`Rect3D{x,y,z}`)
pub(crate) name: &'a Ident,
/// The parameter type.
pub(crate) ty: syn::Type,
/// The pattern for the parameter
pub(crate) pattern: &'a syn::Pat,
}
impl<'a> TraitMethod<'a> {
pub fn new(
mwa: MethodWithAttrs<'a>,
disable_inherent_default: bool,
ctokens: &'a CommonTokens,
arena: &'a Arenas,
) -> Result<Option<Self>, syn::Error> {
let method_signature = &mwa.item.sig;
let decl = method_signature;
let name = &method_signature.ident;
let mut errors = LinearResult::ok(());
let push_error_msg = |errors: &mut Result<(), syn::Error>| {
errors.push_err(spanned_err!(
method_signature.ident,
"Cannot define #[sabi_trait]traits containing methods \
without a `self`/`&self`/`&mut self` receiver (static methods)."
));
};
if decl.inputs.is_empty() {
push_error_msg(&mut errors);
}
let mut input_iter = decl.inputs.iter();
let mut self_param = match input_iter.next() {
Some(FnArg::Receiver(receiver)) => match &receiver.reference {
Some((_, lifetime)) => SelfParam::ByRef {
lifetime: lifetime.as_ref(),
is_mutable: receiver.mutability.is_some(),
},
None => SelfParam::ByVal,
},
Some(FnArg::Typed { .. }) => {
push_error_msg(&mut errors);
SelfParam::ByVal
}
None => {
push_error_msg(&mut errors);
return errors.into_result().map(|_| unreachable!());
}
};
let mut lifetimes: Vec<&'a syn::LifetimeDef> = decl.generics.lifetimes().collect();
let mut return_borrow_kind = None::<BorrowKind>;
let output = match &decl.output {
syn::ReturnType::Default => None,
syn::ReturnType::Type(_, ty) => {
let mut ty: syn::Type = (**ty).clone();
if let SelfParam::ByRef { lifetime, .. } = &mut self_param {
let visit_data = LifetimeUnelider::new(lifetime).visit_type(&mut ty);
return_borrow_kind = visit_data.found_borrow_kind;
if let Some(lt) = visit_data.additional_lifetime_def {
lifetimes.push(lt);
}
}
Some(ty)
}
};
let default = mwa
.item
.default
.as_ref()
.map(|block| DefaultMethod { block });
let where_clause = decl
.generics
.where_clause
.as_ref()
.and_then(|wc| match MethodWhereClause::new(wc, ctokens) {
Ok(x) => Some(x),
Err(e) => {
errors.push_err(e);
None
}
})
.unwrap_or_default();
let mut params = Vec::<MethodParam<'a>>::with_capacity(input_iter.len());
for (param_i, param) in input_iter.enumerate() {
let (pattern, ty) = match param {
FnArg::Receiver { .. } => unreachable!(),
FnArg::Typed(typed) => (&*typed.pat, &*typed.ty),
};
let name = format!("param_{}", param_i);
let mut name = syn::parse_str::<Ident>(&name).unwrap_or_else(|e| {
errors.push_err(e);
dummy_ident()
});
name.set_span(param.span());
params.push(MethodParam {
name: arena.alloc(name),
ty: ty.clone(),
pattern,
});
}
errors.into_result()?;
Ok(Some(Self {
disable_inherent_default,
unsafety: method_signature.unsafety.as_ref(),
abi: method_signature.abi.as_ref(),
derive_attrs: arena.alloc(mwa.attrs.derive_attrs),
other_attrs: arena.alloc(mwa.attrs.other_attrs),
name,
lifetimes,
self_param,
params,
output,
return_borrow_kind,
where_clause,
default,
semicolon: mwa.item.semi_token.as_ref(),
}))
}
/// Returns a clone of `self`,
/// where usages of associated types are replaced for use in `which_item`.
///
/// Whether `Self::AssocTy` is an associated type is determined using `is_assoc_type`,
/// which returns `Some()` with what to do with the associated type.
pub fn replace_self<F>(
&mut self,
replace_with: ReplaceWith,
mut is_assoc_type: F,
) -> Result<(), syn::Error>
where
F: FnMut(&Ident) -> Option<ReplaceWith>,
{
let mut errors = LinearResult::ok(());
for param in self
.params
.iter_mut()
.map(|x| &mut x.ty)
.chain(self.output.as_mut())
{
replace_self_path::replace_self_path(param, replace_with.clone(), &mut is_assoc_type)
.combine_into_err(&mut errors);
}
errors.into()
}
}
////////////////////////////////////////////////////////////////////////////////
/// Used to print the generic parameters of a trait,
/// potentially including its associated types.
#[derive(Debug, Copy, Clone)]
pub struct GenericsTokenizer<'a> {
gen_params_in: GenParamsIn<'a, &'a TokenStream2>,
assoc_tys: Option<(&'a HashMap<&'a Ident, AssocTyWithIndex>, &'a TokenStream2)>,
}
impl<'a> GenericsTokenizer<'a> {
/// Changes type parameters to have a `?Sized` bound.
#[allow(dead_code)]
pub fn set_unsized_types(&mut self) {
self.gen_params_in.set_unsized_types();
}
/// Removes bounds on type parameters.
pub fn set_no_bounds(&mut self) {
self.gen_params_in.set_no_bounds();
}
pub fn skip_lifetimes(&mut self) {
self.gen_params_in.skip_lifetimes();
}
#[allow(dead_code)]
pub fn skip_consts(&mut self) {
self.gen_params_in.skip_consts();
}
}
impl<'a> ToTokens for GenericsTokenizer<'a> {
fn to_tokens(&self, ts: &mut TokenStream2) {
let with_bounds = self.gen_params_in.outputs_bounds();
let with_default = self.gen_params_in.in_what == InWhat::ItemDecl;
let unsized_types = self.gen_params_in.are_types_unsized();
let in_dummy_struct = self.gen_params_in.in_what == InWhat::DummyStruct;
let skips_unbounded = self.gen_params_in.skips_unbounded();
self.gen_params_in.to_tokens(ts);
if let Some((assoc_tys, self_tokens)) = self.assoc_tys {
for with_index in assoc_tys.values() {
let assoc_ty = &with_index.assoc_ty;
if skips_unbounded && assoc_ty.bounds.is_empty() {
continue;
}
self_tokens.to_tokens(ts);
if in_dummy_struct {
use syn::token::{Const, Star};
Star::default().to_tokens(ts);
Const::default().to_tokens(ts);
}
assoc_ty.ident.to_tokens(ts);
let colon_token = assoc_ty.colon_token.filter(|_| with_bounds);
if unsized_types {
if colon_token.is_none() {
Colon::default().to_tokens(ts);
}
quote!(?Sized+).to_tokens(ts);
}
if let Some(colon_token) = colon_token {
colon_token.to_tokens(ts);
assoc_ty.bounds.to_tokens(ts);
}
match &assoc_ty.default {
Some((eq_token, default_ty)) if with_default => {
eq_token.to_tokens(ts);
default_ty.to_tokens(ts);
}
_ => {}
}
Comma::default().to_tokens(ts);
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
/// Represents a `Deserialize<'de>` supertrait bound.
#[derive(Debug, Clone)]
pub(crate) struct DeserializeBound;
/// Used to returns the information about supertraits,to construct TraitDefinition.
struct GetSupertraits<'a> {
impld_traits: Vec<TraitImplness<'a>>,
unimpld_traits: Vec<&'a Ident>,
lifetime_bounds: Punctuated<&'a Lifetime, Comma>,
iterator_item: Option<&'a syn::Type>,
deserialize_bound: Option<DeserializeBound>,
trait_flags: TraitStruct<bool>,
trait_spans: TraitStruct<Span>,
errors: LinearResult<()>,
}
/// Contains information about a supertrait,including whether it's implemented.
#[derive(Debug, Clone)]
pub(crate) struct TraitImplness<'a> {
pub(crate) ident: Ident,
pub(crate) bound: syn::TraitBound,
pub(crate) is_implemented: bool,
pub(crate) _marker: PhantomData<&'a ()>,
}
/// Processes the supertrait bounds of a trait definition.
fn get_supertraits<'a, I>(
supertraits: I,
lifetime_params: &HashSet<&'a Lifetime>,
which_object: WhichObject,
arenas: &'a Arenas,
_ctokens: &'a CommonTokens,
) -> GetSupertraits<'a>
where
I: IntoIterator<Item = &'a TypeParamBound>,
{
let trait_map = TRAIT_LIST
.iter()
.map(|t| (parse_str_as_ident(t.name), t.which_trait))
.collect::<HashMap<Ident, WhichTrait>>();
// A struct indexable by `WhichTrait`,
// with information about all possible supertraits.
let mut trait_struct = TraitStruct::TRAITS.map(|_, t| TraitImplness {
ident: parse_str_as_ident(t.name),
bound: parse_str_as_trait_bound(t.full_path).expect("BUG"),
is_implemented: false,
_marker: PhantomData,
});
let mut lifetime_bounds = Punctuated::<&'a Lifetime, Comma>::new();
let mut iterator_item = None;
let mut errors = LinearResult::ok(());
let deserialize_bound = None;
for supertrait_bound in supertraits {
match supertrait_bound {
TypeParamBound::Trait(trait_bound) => {
let last_path_component = match trait_bound.path.segments.last() {
Some(x) => x,
None => continue,
};
let trait_ident = &last_path_component.ident;
match trait_map.get(trait_ident) {
Some(&which_trait) => {
let usable_by = which_trait.usable_by();
match which_object {
WhichObject::DynTrait if !usable_by.dyn_trait() => {
errors.push_err(spanned_err!(
trait_bound.path,
"cannot use this trait with DynTrait",
));
}
WhichObject::RObject if !usable_by.robject() => {
errors.push_err(spanned_err!(
trait_bound.path,
"cannot use this trait with RObject.
To make that trait usable you must use the \
#[sabi(use_dyntrait)] attribute,\
which changes the trait object implementation \
from using RObject to using DynTrait.\n\
",
));
}
WhichObject::DynTrait | WhichObject::RObject => {}
}
fn set_impld(wtrait: &mut TraitImplness<'_>, span: Span) {
wtrait.is_implemented = true;
wtrait.ident.set_span(span);
SetSpanVisitor::new(span).visit_trait_bound_mut(&mut wtrait.bound);
}
let span = trait_bound.span();
set_impld(&mut trait_struct[which_trait], span);
match which_trait {
WhichTrait::Iterator | WhichTrait::DoubleEndedIterator => {
set_impld(&mut trait_struct.iterator, span);
let iter_item = extract_iterator_item(last_path_component, arenas);
iterator_item = iterator_item.or(iter_item);
}
WhichTrait::Deserialize => {
errors.push_err(spanned_err!(
trait_bound.path,
"Deserialize is not currently supported."
));
}
WhichTrait::Serialize => {
errors.push_err(spanned_err!(
trait_bound.path,
"Serialize is not currently supported."
));
}
WhichTrait::Eq | WhichTrait::PartialOrd => {
set_impld(&mut trait_struct.partial_eq, span);
}
WhichTrait::Ord => {
set_impld(&mut trait_struct.partial_eq, span);
set_impld(&mut trait_struct.eq, span);
set_impld(&mut trait_struct.partial_ord, span);
}
WhichTrait::IoBufRead => {
set_impld(&mut trait_struct.io_read, span);
}
WhichTrait::Error => {
set_impld(&mut trait_struct.display, span);
set_impld(&mut trait_struct.debug, span);
}
_ => {}
}
}
None => {
let list = trait_map
.keys()
.map(|x| x.to_string())
.collect::<Vec<String>>();
errors.push_err(spanned_err!(
supertrait_bound,
"Unexpected supertrait bound.\nExpected one of:\n{}",
list.join("/"),
));
break;
}
}
}
TypeParamBound::Lifetime(lt) => {
if lifetime_params.contains(lt) {
lifetime_bounds.push(lt);
} else {
errors.push_err(spanned_err!(
lt,
"Lifetimes is not from the trait or `'static`.",
));
break;
}
}
};
}
let iter_trait = &mut trait_struct.iterator;
let de_iter_trait = &mut trait_struct.double_ended_iterator;
if iter_trait.is_implemented || de_iter_trait.is_implemented {
let iter_item: syn::Type = iterator_item.cloned().unwrap_or_else(|| {
let span = if de_iter_trait.is_implemented {
de_iter_trait.ident.span()
} else {
iter_trait.ident.span()
};
errors.push_err(syn_err!(span, "You must specify the Iterator item type."));
parse_str_as_type("()").expect("BUG")
});
let path_args = type_as_iter_path_arguments(iter_item);
fn set_last_arguments(bounds: &mut syn::TraitBound, path_args: syn::PathArguments) {
bounds.path.segments.last_mut().expect("BUG").arguments = path_args;
}
if de_iter_trait.is_implemented {
set_last_arguments(&mut de_iter_trait.bound, path_args.clone());
}
set_last_arguments(&mut iter_trait.bound, path_args);
}
let mut impld_traits = Vec::new();
let mut unimpld_traits = Vec::new();
let trait_flags = trait_struct.as_ref().map(|_, x| x.is_implemented);
let trait_spans = trait_struct.as_ref().map(|_, x| x.ident.span());
for trait_ in trait_struct.to_vec() {
if trait_.is_implemented {
impld_traits.push(trait_);
} else {
unimpld_traits.push(arenas.alloc(trait_.ident.clone()));
}
}
GetSupertraits {
impld_traits,
unimpld_traits,
lifetime_bounds,
iterator_item,
deserialize_bound,
trait_flags,
trait_spans,
errors,
}
}
////////////////////////////////////////////////////////////////////////////////
/// Extracts the Iterator::Item out of a path component.
fn extract_iterator_item<'a>(
last_path_component: &syn::PathSegment,
arenas: &'a Arenas,
) -> Option<&'a syn::Type> {
use syn::{GenericArgument, PathArguments};
let angle_brackets = match &last_path_component.arguments {
PathArguments::AngleBracketed(x) => x,
_ => return None,
};
for gen_arg in &angle_brackets.args {
match gen_arg {
GenericArgument::Binding(bind) if bind.ident == "Item" => {
return Some(arenas.alloc(bind.ty.clone()));
}
_ => {}
}
}
None
}
/// Converts a type to `<Item= ty >`.
fn type_as_iter_path_arguments(ty: syn::Type) -> syn::PathArguments {
let x = syn::Binding {
ident: parse_str_as_ident("Item"),
eq_token: Default::default(),
ty,
};
let x = syn::GenericArgument::Binding(x);
let x = syn::AngleBracketedGenericArguments {
colon2_token: None,
lt_token: Default::default(),
args: iter::once(x).collect(),
gt_token: Default::default(),
};
syn::PathArguments::AngleBracketed(x)
}
/// Extracts the lifetime in `Deserialize<'lt>` out of a path component.
#[allow(dead_code)]
fn extract_deserialize_lifetime<'a>(
last_path_component: &syn::PathSegment,
arenas: &'a Arenas,
) -> Result<&'a syn::Lifetime, syn::Error> {
use syn::{GenericArgument, PathArguments};
let angle_brackets = match &last_path_component.arguments {
PathArguments::AngleBracketed(x) => x,
_ => return_spanned_err!(last_path_component, "Expected a lifetime parameter inside"),
};
for gen_arg in &angle_brackets.args {
if let GenericArgument::Lifetime(lt) = gen_arg {
return Ok(arenas.alloc(lt.clone()));
}
}
Err(spanned_err!(
last_path_component,
"Expected a lifetime parameter inside"
))
}
|
//! Commands traits and base CLI parsing
use crate::error::Error;
use crate::impls::OscarDoc;
use clap::ArgMatches;
pub trait Command {
fn hook_to_clap(ctx: clap::App<'static>) -> clap::App<'static>
where
Self: Sized,
{
ctx.subcommand(Self::subcommand())
}
fn subcommand() -> clap::App<'static>
where
Self: Sized;
fn run(matches: &ArgMatches) -> Result<(), Error>
where
Self: Sized;
}
#[cfg(not(tarpaulin_include))]
pub(crate) fn build_app() -> clap::App<'static> {
use clap::AppSettings;
use crate::impls::OscarTxt;
clap::App::new("oscar-tools")
.global_setting(AppSettings::ArgRequiredElseHelp)
.subcommand(OscarDoc::subcommand())
.subcommand(OscarTxt::subcommand())
}
#[cfg(not(tarpaulin_include))]
pub(crate) fn run(matches: ArgMatches) -> Result<(), Error> {
use crate::impls::OscarTxt;
let (version, subcommand) = matches
.subcommand()
.ok_or_else(|| Error::Custom("No version provided!".to_string()))?;
match version {
//TODO: this should be automatically done by calling a version resolver
// Some struct/enum that holds OSCAR versions, and implements a from string that
// buils something that implements run and runs the correct OSCAR version
"v2" => OscarDoc::run(subcommand),
"v1" => OscarTxt::run(subcommand),
x => Err(Error::Custom(format!("Unknown version {x}"))),
}
}
/// Runnable traits have to be implemented by commands
/// in order to be executed from CLI.
// TODO: Currently, run returns (), so if the command
// actually returns something usable, it cannot pass it on.
// shall we provide flexibility to the Runnable trait by using generics
// or provide another trait like Queryable to fetch results?
pub trait Runnable {
fn run(&self) -> Result<(), Error>;
}
|
//! Server discovery endpoints.
pub mod discover_homeserver;
pub mod get_server_keys;
pub mod get_server_version;
|
use std::{
collections::{hash_map::Entry, HashMap},
fmt::Display,
num::NonZeroUsize,
sync::{Arc, RwLock},
};
use async_trait::async_trait;
use backoff::BackoffConfig;
use object_store::DynObjectStore;
use observability_deps::tracing::warn;
use parquet_file::ParquetFilePath;
use uuid::Uuid;
use super::{
util::{copy_files, delete_files},
Scratchpad, ScratchpadGen,
};
#[derive(Debug)]
pub struct ProdScratchpadGen {
concurrency: NonZeroUsize,
shadow_mode: bool,
backoff_config: BackoffConfig,
store_input: Arc<DynObjectStore>,
store_scratchpad: Arc<DynObjectStore>,
store_output: Arc<DynObjectStore>,
}
impl ProdScratchpadGen {
pub fn new(
shadow_mode: bool,
concurrency: NonZeroUsize,
backoff_config: BackoffConfig,
store_input: Arc<DynObjectStore>,
store_scratchpad: Arc<DynObjectStore>,
store_output: Arc<DynObjectStore>,
) -> Self {
Self {
shadow_mode,
concurrency,
backoff_config,
store_input,
store_scratchpad,
store_output,
}
}
}
impl Display for ProdScratchpadGen {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "prod")
}
}
/// ScratchpadGen is the factory pattern; it creates Scratchpads
impl ScratchpadGen for ProdScratchpadGen {
fn pad(&self) -> Arc<dyn Scratchpad> {
Arc::new(ProdScratchpad {
shadow_mode: self.shadow_mode,
concurrency: self.concurrency,
backoff_config: self.backoff_config.clone(),
store_input: Arc::clone(&self.store_input),
store_scratchpad: Arc::clone(&self.store_scratchpad),
store_output: Arc::clone(&self.store_output),
mask: Uuid::new_v4(),
files_unmasked: RwLock::new(HashMap::default()),
})
}
}
struct ProdScratchpad {
shadow_mode: bool,
concurrency: NonZeroUsize,
backoff_config: BackoffConfig,
store_input: Arc<DynObjectStore>,
store_scratchpad: Arc<DynObjectStore>,
store_output: Arc<DynObjectStore>,
mask: Uuid,
/// Set of known, unmasked file.
///
/// If the file is part of this map, it is in the scratchpad. If the boolean key is set, it was already copied to
/// the output store
files_unmasked: RwLock<HashMap<ParquetFilePath, bool>>,
}
impl std::fmt::Debug for ProdScratchpad {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let ref_files_unmasked = self.files_unmasked.read().unwrap();
f.debug_struct("ProdScratchpad")
.field("concurrency", &self.concurrency)
.field("backoff_config", &self.backoff_config)
.field("store_input", &self.store_input)
.field("store_scratchpad", &self.store_scratchpad)
.field("store_output", &self.store_output)
.field("mask", &self.mask)
.field("files_unmasked", &ref_files_unmasked)
.finish()
}
}
impl ProdScratchpad {
fn apply_mask(&self, files: &[ParquetFilePath]) -> (Vec<ParquetFilePath>, Vec<Uuid>) {
files
.iter()
.map(|f| {
let uuid = Self::xor_uuids(f.objest_store_id(), self.mask);
let f = (f.clone()).with_object_store_id(uuid);
(f, uuid)
})
.unzip()
}
fn xor_uuids(a: Uuid, b: Uuid) -> Uuid {
Uuid::from_u128(a.as_u128() ^ b.as_u128())
}
fn check_known(
&self,
files_unmasked: &[ParquetFilePath],
files_masked: &[ParquetFilePath],
output: bool,
) -> (Vec<ParquetFilePath>, Vec<ParquetFilePath>) {
let mut ref_files_unmasked = self.files_unmasked.write().unwrap();
files_unmasked
.iter()
.zip(files_masked)
.filter(|(f_unmasked, _f_masked)| {
match ref_files_unmasked.entry((*f_unmasked).clone()) {
Entry::Occupied(mut o) => {
let old_var = *o.get();
*o.get_mut() |= output;
output && !old_var
}
Entry::Vacant(v) => {
v.insert(output);
true
}
}
})
.map(|(un, masked)| (un.clone(), masked.clone()))
.unzip()
}
}
impl Drop for ProdScratchpad {
fn drop(&mut self) {
let mut ref_files_unmasked = self.files_unmasked.write().unwrap();
if !ref_files_unmasked.is_empty() {
warn!("scratchpad context not cleaned, may leak resources");
// clean up eventually
// Note: Use manual clean up code and do not create yet-another ProdScratchpad to avoid infinite recursions
// during drop.
let files = ref_files_unmasked
.drain()
.map(|(k, _in_out)| k)
.collect::<Vec<_>>();
let (files_masked, _uuids) = self.apply_mask(&files);
let store_scratchpad = Arc::clone(&self.store_scratchpad);
let concurrency = self.concurrency;
let backoff_config = self.backoff_config.clone();
tokio::spawn(async move {
delete_files(
&files_masked,
Arc::clone(&store_scratchpad),
&backoff_config,
concurrency,
)
.await;
});
}
}
}
#[async_trait]
impl Scratchpad for ProdScratchpad {
fn uuids(&self, files: &[ParquetFilePath]) -> Vec<Uuid> {
let (_, uuids) = self.apply_mask(files);
uuids
}
async fn load_to_scratchpad(&self, files: &[ParquetFilePath]) -> Vec<Uuid> {
let (files_to, uuids) = self.apply_mask(files);
let (files_from, files_to) = self.check_known(files, &files_to, false);
copy_files(
&files_from,
&files_to,
Arc::clone(&self.store_input),
Arc::clone(&self.store_scratchpad),
&self.backoff_config,
self.concurrency,
)
.await;
uuids
}
async fn make_public(&self, files: &[ParquetFilePath]) -> Vec<Uuid> {
let (files_to, uuids) = self.apply_mask(files);
// only keep files that we did not know about, all others we've already synced it between the two stores
let (files_to, files_from) = self.check_known(&files_to, files, true);
copy_files(
&files_from,
&files_to,
Arc::clone(&self.store_scratchpad),
Arc::clone(&self.store_output),
&self.backoff_config,
self.concurrency,
)
.await;
uuids
}
// clean_from_scratchpad selectively removes some files from the scratchpad.
// This should be called after uploading files to objectstore.
// Cleaning should be done regularly, so the scratchpad doesn't get too big.
async fn clean_from_scratchpad(&self, files: &[ParquetFilePath]) {
let files_masked: Vec<ParquetFilePath>;
let _uuid: Vec<Uuid>;
// scope the files_unmasked lock to protect manipulation of the scratchpad's state, but release it
// before doing the async delete of files removed from the scratchpad.
{
let mut ref_files_unmasked = self.files_unmasked.write().unwrap();
let files = files
.iter()
.filter(|f| ref_files_unmasked.remove(f).is_some())
.cloned()
.collect::<Vec<_>>();
(files_masked, _uuid) = self.apply_mask(&files);
}
delete_files(
&files_masked,
Arc::clone(&self.store_scratchpad),
&self.backoff_config,
self.concurrency,
)
.await;
}
// clean_written_from_scratchpad is the same as clean_from_scratchpad, but it does not remove files
// when in shadow mode, since in shadow mode the scratchpad is the only copy of files.
async fn clean_written_from_scratchpad(&self, files: &[ParquetFilePath]) {
if !self.shadow_mode {
self.clean_from_scratchpad(files).await;
}
}
async fn clean(&self) {
// clean will remove all files in the scratchpad as of the time files_unmasked is locked.
let files: Vec<_> = self
.files_unmasked
.read()
.unwrap()
.keys()
.cloned()
.collect();
// self.files_unmasked is locked again in clean_from_scratchpad. If another thread removes a file
// between this relock, clean_from_scratchpad will skip it.
self.clean_from_scratchpad(&files).await;
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use test_helpers::{maybe_start_logging, tracing::TracingCapture};
use crate::components::scratchpad::test_util::{assert_content, file_path, stores};
use compactor_test_utils::list_object_store;
use super::*;
#[test]
fn test_display() {
let (store_input, store_scratchpad, store_output) = stores();
let gen = ProdScratchpadGen::new(
true,
NonZeroUsize::new(1).unwrap(),
BackoffConfig::default(),
store_input,
store_scratchpad,
store_output,
);
assert_eq!(gen.to_string(), "prod");
}
#[tokio::test]
async fn test_staging() {
maybe_start_logging();
let (store_input, store_scratchpad, store_output) = stores();
let gen = ProdScratchpadGen::new(
true,
NonZeroUsize::new(1).unwrap(),
BackoffConfig::default(),
Arc::clone(&store_input),
Arc::clone(&store_scratchpad),
Arc::clone(&store_output),
);
let pad = gen.pad();
let f1 = file_path(1);
let f2 = file_path(2);
let f3 = file_path(3);
let f4 = file_path(4);
let f5_masked = file_path(5);
let f6_masked = file_path(6);
let f7_masked = file_path(7);
for f in [&f1, &f2, &f3, &f4] {
store_input
.put(&f.object_store_path(), vec![].into())
.await
.unwrap();
}
assert_content(&store_input, [&f1, &f2, &f3, &f4]).await;
assert_content(&store_scratchpad, []).await;
assert_content(&store_output, []).await;
let early_get_uuids = pad.uuids(&[f1.clone(), f2.clone()]);
let uuids = pad.load_to_scratchpad(&[f1.clone(), f2.clone()]).await;
assert_eq!(uuids.len(), 2);
assert_eq!(early_get_uuids, uuids);
let f1_masked = f1.clone().with_object_store_id(uuids[0]);
let f2_masked = f2.clone().with_object_store_id(uuids[1]);
assert_content(&store_input, [&f1, &f2, &f3, &f4]).await;
assert_content(&store_scratchpad, [&f1_masked, &f2_masked]).await;
assert_content(&store_output, []).await;
let uuids = pad.load_to_scratchpad(&[f2.clone(), f3.clone()]).await;
assert_eq!(uuids.len(), 2);
assert_eq!(f2_masked.objest_store_id(), uuids[0]);
let f3_masked = f3.clone().with_object_store_id(uuids[1]);
assert_content(&store_input, [&f1, &f2, &f3, &f4]).await;
assert_content(&store_scratchpad, [&f1_masked, &f2_masked, &f3_masked]).await;
assert_content(&store_output, []).await;
for f in [&f5_masked, &f6_masked, &f7_masked] {
store_scratchpad
.put(&f.object_store_path(), vec![].into())
.await
.unwrap();
}
assert_content(&store_input, [&f1, &f2, &f3, &f4]).await;
assert_content(
&store_scratchpad,
[
&f1_masked, &f2_masked, &f3_masked, &f5_masked, &f6_masked, &f7_masked,
],
)
.await;
assert_content(&store_output, []).await;
let uuids = pad
.make_public(&[f5_masked.clone(), f6_masked.clone()])
.await;
assert_eq!(uuids.len(), 2);
let f5 = f5_masked.clone().with_object_store_id(uuids[0]);
let f6 = f6_masked.clone().with_object_store_id(uuids[1]);
assert_content(&store_input, [&f1, &f2, &f3, &f4]).await;
assert_content(
&store_scratchpad,
[
&f1_masked, &f2_masked, &f3_masked, &f5_masked, &f6_masked, &f7_masked,
],
)
.await;
assert_content(&store_output, [&f5, &f6]).await;
let uuids = pad.make_public(&[f1_masked.clone()]).await;
assert_eq!(uuids.len(), 1);
assert_eq!(f1.objest_store_id(), uuids[0]);
assert_content(&store_input, [&f1, &f2, &f3, &f4]).await;
assert_content(
&store_scratchpad,
[
&f1_masked, &f2_masked, &f3_masked, &f5_masked, &f6_masked, &f7_masked,
],
)
.await;
assert_content(&store_output, [&f1, &f5, &f6]).await;
// we're in shadow mode, so written (compaction output) files must be be removed.
pad.clean_written_from_scratchpad(&[f1.clone(), f5.clone()])
.await;
// they're still there
assert_content(
&store_scratchpad,
[
&f1_masked, &f2_masked, &f3_masked, &f5_masked, &f6_masked, &f7_masked,
],
)
.await;
pad.clean_from_scratchpad(&[f1.clone(), f5.clone()]).await;
assert_content(
&store_scratchpad,
[&f2_masked, &f3_masked, &f6_masked, &f7_masked],
)
.await;
// Reload a cleaned file back into the scratchpad, simulating a backlogged partition that
// requires several compaction loops (where the output of one compaction is later the input
// to a subsequent compaction).
let uuids = pad.load_to_scratchpad(&[f1.clone()]).await;
assert_eq!(uuids.len(), 1);
assert_eq!(f1_masked.objest_store_id(), uuids[0]);
assert_content(&store_input, [&f1, &f2, &f3, &f4]).await;
assert_content(
&store_scratchpad,
[&f1_masked, &f2_masked, &f3_masked, &f6_masked, &f7_masked],
)
.await;
assert_content(&store_output, [&f1, &f5, &f6]).await;
pad.clean().await;
assert_content(&store_input, [&f1, &f2, &f3, &f4]).await;
assert_content(&store_scratchpad, [&f7_masked]).await; // pad didn't know about these files
assert_content(&store_output, [&f1, &f5, &f6]).await;
}
#[tokio::test]
async fn test_collision() {
let (store_input, store_scratchpad, store_output) = stores();
let gen = ProdScratchpadGen::new(
false,
NonZeroUsize::new(1).unwrap(),
BackoffConfig::default(),
Arc::clone(&store_input),
Arc::clone(&store_scratchpad),
Arc::clone(&store_output),
);
let pad1 = gen.pad();
let pad2 = gen.pad();
let f = file_path(1);
store_input
.put(&f.object_store_path(), Default::default())
.await
.unwrap();
let uuids = pad1.load_to_scratchpad(&[f.clone()]).await;
assert_eq!(uuids.len(), 1);
let f_masked1 = f.clone().with_object_store_id(uuids[0]);
let uuids = pad2.load_to_scratchpad(&[f.clone()]).await;
assert_eq!(uuids.len(), 1);
let f_masked2 = f.with_object_store_id(uuids[0]);
assert_content(&store_scratchpad, [&f_masked1, &f_masked2]).await;
pad2.clean().await;
assert_content(&store_scratchpad, [&f_masked1]).await;
}
#[tokio::test]
async fn test_clean_on_drop() {
let (store_input, store_scratchpad, store_output) = stores();
let gen = ProdScratchpadGen::new(
false,
NonZeroUsize::new(1).unwrap(),
BackoffConfig::default(),
Arc::clone(&store_input),
Arc::clone(&store_scratchpad),
Arc::clone(&store_output),
);
let pad = gen.pad();
let f = file_path(1);
store_input
.put(&f.object_store_path(), Default::default())
.await
.unwrap();
pad.load_to_scratchpad(&[f]).await;
let capture = TracingCapture::new();
drop(pad);
// warning emitted
assert_eq!(
capture.to_string(),
"level = WARN; message = scratchpad context not cleaned, may leak resources; "
);
// eventually cleaned up
tokio::time::timeout(Duration::from_secs(5), async {
loop {
if list_object_store(&store_scratchpad).await.is_empty() {
return;
}
tokio::time::sleep(Duration::from_millis(10)).await;
}
})
.await
.expect("no timeout");
}
#[tokio::test]
#[should_panic(expected = "foo")]
async fn test_clean_does_not_crash_on_panic() {
let (store_input, store_scratchpad, store_output) = stores();
let gen = ProdScratchpadGen::new(
false,
NonZeroUsize::new(1).unwrap(),
BackoffConfig::default(),
Arc::clone(&store_input),
Arc::clone(&store_scratchpad),
Arc::clone(&store_output),
);
let pad = gen.pad();
let f = file_path(1);
store_input
.put(&f.object_store_path(), Default::default())
.await
.unwrap();
pad.load_to_scratchpad(&[f]).await;
panic!("foo");
}
}
|
use std::cmp::{max, min, Reverse};
use std::collections::BinaryHeap;
use std::collections::{HashMap, HashSet};
use std::vec;
use itertools::Itertools;
use whiteread::parse_line;
const ten97: usize = 1000000007;
// すでにあるデータとの差分を保存しておく
// 取り出すときにその値をapplyする
fn main() {
let q: usize = parse_line().unwrap();
let mut bh = BinaryHeap::new();
let mut diff: isize = 0;
for _ in 0..q {
let query: Vec<isize> = parse_line().unwrap();
if query[0] == 1 {
bh.push(Reverse(query[1] + diff));
} else if query[0] == 2 {
diff -= query[1];
} else {
let t = bh.pop().unwrap();
println!("{}", t.0 - diff);
}
}
}
|
use super::*;
#[test]
fn cmp_and_variance() {
fn eq_rcow<'a, 'b, T, U>(left: &RCowVal<'a, T>, right: &RCowVal<'b, U>) -> bool
where
T: Clone + PartialEq<U>,
U: Clone,
{
RCow::eq(left, right)
}
fn cmp_rcow<'a, 'b, T, U>(left: &RCowVal<'a, T>, right: &RCowVal<'b, U>) -> Ordering
where
T: Clone + PartialOrd<U>,
U: Clone,
{
RCow::partial_cmp(left, right).unwrap()
}
fn eq_rcow_str<'a, 'b>(left: &RCowStr<'a>, right: &RCowStr<'b>) -> bool {
RCow::eq(left, right)
}
fn cmp_rcow_str<'a, 'b>(left: &RCowStr<'a>, right: &RCowStr<'b>) -> Ordering {
RCow::cmp(left, right)
}
fn eq_rcow_slice<'a, 'b, T, U>(left: &RCowSlice<'a, T>, right: &RCowSlice<'b, U>) -> bool
where
T: Clone + PartialEq<U>,
U: Clone,
{
RCow::eq(left, right)
}
// std doesn't have a `[T]: PartialCmp<[U]>` blanket impl.
fn cmp_rcow_slice<'a, 'b, T>(left: &RCowSlice<'a, T>, right: &RCowSlice<'b, T>) -> Ordering
where
T: Clone + PartialOrd,
{
RCow::partial_cmp(left, right).unwrap()
}
{
let bb = 3u8;
let left = RCow::Owned(2u8);
let middle = RCow::Borrowed(&bb);
let right = RCow::Owned(5u8);
assert!(eq_rcow(&left, &left));
assert!(!eq_rcow(&left, &middle));
assert!(!eq_rcow(&right, &middle));
assert_eq!(cmp_rcow(&left, &left), Ordering::Equal);
assert_eq!(cmp_rcow(&left, &middle), Ordering::Less);
assert_eq!(cmp_rcow(&right, &middle), Ordering::Greater);
}
// polymorphic comparison
{
let left = RCowVal::Owned(rvec![3]);
let right = RCowVal::Owned(&[3][..]);
assert!(eq_rcow(&left, &right));
assert_eq!(cmp_rcow(&left, &right), Ordering::Equal);
}
{
let bb = "foo".to_string();
let left = RCowStr::Borrowed(RStr::from_str("bar"));
let middle = RCowStr::Borrowed(RStr::from_str(&bb));
let right = RCowStr::Owned(RString::from("qux"));
assert!(eq_rcow_str(&left, &left));
assert!(!eq_rcow_str(&left, &middle));
assert!(!eq_rcow_str(&right, &middle));
assert_eq!(cmp_rcow_str(&left, &left), Ordering::Equal);
assert_eq!(cmp_rcow_str(&left, &middle), Ordering::Less);
assert_eq!(cmp_rcow_str(&right, &middle), Ordering::Greater);
}
{
let aa = [13, 21, 34];
let bb = aa.iter().collect::<RVec<&u8>>();
let left = RCowSlice::Borrowed(RSlice::from_slice(&[&3u8, &5, &9]));
let middle = RCowSlice::Owned(bb);
let right = RCowSlice::Borrowed(RSlice::from_slice(&[&55u8, &88, &144]));
assert!(eq_rcow_slice(&left, &left));
assert!(!eq_rcow_slice(&left, &middle));
assert!(!eq_rcow_slice(&right, &middle));
assert_eq!(cmp_rcow_slice(&left, &left), Ordering::Equal);
assert_eq!(cmp_rcow_slice(&left, &middle), Ordering::Less);
assert_eq!(cmp_rcow_slice(&right, &middle), Ordering::Greater);
}
// polymorphic comparison
{
let left = &[vec![3]];
let left = RCowSlice::Borrowed(RSlice::<Vec<u8>>::from_slice(left));
let right = &[[3]];
let right = RCowSlice::Borrowed(RSlice::<[u8; 1]>::from_slice(right));
assert!(eq_rcow_slice(&left, &right));
}
}
#[test]
fn rcow_from_str() {
const RCSTR: &RCowStr<'_> = &RCow::from_str("bar");
assert_eq!(RCSTR.as_str(), "bar");
#[cfg(feature = "rust_1_64")]
{
const STR: &str = RCSTR.as_str();
assert_eq!(STR, "bar");
}
}
#[test]
fn rcow_from_slice() {
const RCSLICE: &RCowSlice<'_, u8> = &RCow::from_slice(b"foo");
assert_eq!(RCSLICE.as_slice(), b"foo");
#[cfg(feature = "rust_1_64")]
{
const SLICE: &[u8] = RCSLICE.as_slice();
assert_eq!(SLICE, b"foo");
}
}
#[test]
fn rcow_from() {
{
const S: &str = "what the heck";
let ref_owned: &String = &S.to_string();
let ref_rowned: &RString = &S.to_string().into_c();
assert_matches!(RCow::from(S), RCow::Borrowed(x @ RStr{..}) if x == S);
assert_matches!(RCow::from(ref_owned), RCow::Borrowed(x @ RStr{..}) if x == S);
assert_matches!(RCow::from(ref_rowned), RCow::Borrowed(x @ RStr{..}) if x == S);
assert_matches!(
RCow::from(Cow::from(S)),
RCow::Borrowed(x @ RStr{..}) if x == S
);
assert_matches!(
RCow::from(Cow::from(S.to_string())),
RCow::Owned(ref x @ RString{..}) if x == S
);
assert_matches!(
Cow::from(S).into_c(),
RCow::Borrowed(x @ RStr{..}) if x == S
);
assert_matches!(
Cow::from(S.to_string()).into_c(),
RCow::Owned(ref x @ RString{..}) if x == S
);
assert_matches!(
RCow::from(RString::from(S)),
RCow::Owned(ref x @ RString{..}) if x == S
);
assert_eq!(RCow::from(Cow::from(S)), S);
assert_eq!(RCow::from(S.to_string()), S);
assert_eq!(RCow::from(RStr::from(S)), S);
assert_eq!(RCow::from(RString::from(S)), S);
}
{
const S: &[u8] = &[0, 1, 2, 3];
let rref: RSlice<'_, u8> = S.into_c();
let ref_owned: &Vec<u8> = &S.to_vec();
let ref_rowned: &RVec<u8> = &S.to_vec().into_c();
assert_matches!(RCow::from(S), RCow::Borrowed(x @ RSlice{..}) if x == S);
assert_matches!(RCow::from(rref), RCow::Borrowed(x @ RSlice{..}) if x == S);
assert_matches!(RCow::from(ref_owned), RCow::Borrowed(x @ RSlice{..}) if x == S);
assert_matches!(RCow::from(ref_rowned), RCow::Borrowed(x @ RSlice{..}) if x == S);
assert_matches!(
RCow::from(Cow::from(S)),
RCow::Borrowed(x @ RSlice{..}) if x == S
);
assert_matches!(
RCow::from(Cow::from(S.to_vec())),
RCow::Owned(ref x @ RVec{..}) if x == S
);
assert_matches!(
Cow::from(S).into_c(),
RCow::Borrowed(x @ RSlice{..}) if x == S
);
assert_matches!(
Cow::from(S.to_vec()).into_c(),
RCow::Owned(ref x @ RVec{..}) if x == S
);
assert_matches!(
RCow::from(S.to_vec()),
RCow::Owned(ref x @ RVec{..}) if x == S
);
assert_matches!(
RCow::from(RVec::from(S)),
RCow::Owned(ref x @ RVec{..}) if x == S
);
}
{
const S: &u32 = &1000u32;
assert_eq!(*RCow::Borrowed(S), 1000);
assert_eq!(*RCowVal::Owned(*S), 1000);
assert_matches!(RCow::from(Cow::Borrowed(S)), RCow::Borrowed(&1000));
assert_matches!(RCow::from(Cow::<u32>::Owned(*S)), RCow::Owned(1000));
assert_matches!(Cow::Borrowed(S).into_c(), RCow::Borrowed(&1000));
assert_matches!(Cow::<u32>::Owned(*S).into_c(), RCow::Owned(1000));
}
}
#[test]
fn rcow_into() {
{
const S: &str = "what the heck";
let bcow = || RCow::from(S);
let ocow = || RCow::from(S.to_owned());
assert_matches!(Cow::from(bcow()), Cow::Borrowed(S));
assert_matches!(Cow::from(ocow()), Cow::Owned(ref x @ String{..}) if x == S);
assert_matches!(bcow().into_rust(), Cow::Borrowed(S));
assert_matches!(ocow().into_rust(), Cow::Owned(ref x @ String{..}) if x == S);
}
{
const S: &[u8] = &[0, 1, 2, 3];
let bcow = || RCow::from(S);
let ocow = || RCow::from(S.to_owned());
assert_matches!(Cow::from(bcow()), Cow::Borrowed(S));
assert_matches!(Cow::from(ocow()), Cow::Owned(ref x @ Vec{..}) if x == S);
assert_matches!(bcow().into_rust(), Cow::Borrowed(S));
assert_matches!(ocow().into_rust(), Cow::Owned(ref x @ Vec{..}) if x == S);
}
{
const S: u32 = 1234;
let bcow = || RCowVal::Borrowed(&S);
let ocow = || RCowVal::Owned(S);
assert_matches!(Cow::from(bcow()), Cow::Borrowed(&S));
assert_matches!(Cow::from(ocow()), Cow::Owned(S));
assert_matches!(bcow().into_rust(), Cow::Borrowed(&S));
assert_matches!(ocow().into_rust(), Cow::Owned(S));
}
}
#[test]
fn to_mut() {
{
let mut value = RCow::<&u32, u32>::Borrowed(&100);
assert_eq!(*value, 100);
*value.to_mut() = 137;
assert_eq!(*value, 137);
}
{
let mut value = RCow::<RStr<'_>, RString>::Borrowed("what".into_c());
assert_eq!(&*value, "what");
*value.to_mut() = "the".piped(RString::from);
assert_eq!(&*value, "the");
}
{
let arr = [0, 1, 2, 3];
let mut value = RCow::<RSlice<'_, u32>, RVec<u32>>::Borrowed((&arr[..]).into_c());
assert_eq!(&*value, &arr[..]);
*value.to_mut() = vec![99, 100, 101].into_c();
assert_eq!(&*value, &[99, 100, 101][..]);
}
}
#[test]
fn into_owned() {
{
let value = RCowVal::<'_, u32>::Borrowed(&100);
let value: u32 = value.into_owned();
assert_eq!(value, 100);
}
{
let value = RCowStr::<'_>::Borrowed("what".into());
let value: RString = value.into_owned();
assert_eq!(&*value, "what");
}
{
let arr = [0, 1, 2, 3];
let value = RCowSlice::<'_, u32>::Borrowed((&arr[..]).into());
let value: RVec<u32> = value.into_owned();
assert_eq!(&*value, &arr[..]);
}
}
#[test]
fn deserialize() {
{
// Borrowed string
let json = r##" "what the hell" "##;
let str_borr = "what the hell".piped(RStr::from);
let what: BorrowingRCowStr<'_> = serde_json::from_str(json).unwrap();
assert_eq!(what.cow.as_borrowed(), Some(str_borr),);
}
{
// Owned string
let json = r##" "what \nthe hell" "##;
let str_owned = "what \nthe hell".piped(RString::from);
let what: RCowStr<'_> = serde_json::from_str(json).unwrap();
assert_eq!(what.as_owned(), Some(&str_owned),);
}
{
// Owned list
let json = r##" [0, 1, 2, 3] "##;
let what: RCowSlice<'_, u8> = serde_json::from_str(json).unwrap();
assert_eq!(what.as_owned(), Some(&vec![0, 1, 2, 3].into_c()),);
}
{
// Borrowed list, using bincode.
let list = [0u8, 1, 2, 3];
let serialized = bincode::serialize(&list[..]).unwrap();
let what: BorrowingRCowU8Slice<'_> = bincode::deserialize(&serialized[..]).unwrap();
assert_eq!(what.cow.as_borrowed(), Some((&list[..]).into_c()),);
}
{
// Owned value
let json = r##" 1000 "##;
let what: RCowVal<'_, u16> = serde_json::from_str(json).unwrap();
assert_eq!(what.as_owned(), Some(&1000),);
}
}
|
#[derive(Default)]
pub struct Trie {
is_end: bool,
children: [Option<Box<Trie>>; 26],
}
impl Trie {
/** Initialize your data structure here. */
pub fn new() -> Self {
Self::default()
}
/** Inserts a word into the trie. */
pub fn insert(&mut self, word: String) {
let mut cur = self;
for i in word.bytes().map(|b| (b - b'a') as usize) {
cur = cur.children[i].get_or_insert_with(|| Box::new(Trie::new()));
}
cur.is_end = true;
}
/** Returns if the word is in the trie. */
pub fn search(&self, word: String) -> bool {
let mut cur = self;
for i in word.bytes().map(|b| (b - b'a') as usize) {
if let Some(child) = &cur.children[i] {
cur = child;
} else {
return false;
}
}
cur.is_end
}
/** Returns if there is any word in the trie that starts with the given prefix. */
pub fn starts_with(&self, prefix: String) -> bool {
let mut cur = self;
for i in prefix.bytes().map(|b| (b - b'a') as usize) {
if let Some(child) = &cur.children[i] {
cur = child;
} else {
return false;
}
}
true
}
}
#[test]
fn test0208() {
let mut trie = Trie::new();
trie.insert("apple".to_string());
assert_eq!(trie.search("apple".to_string()), true);
assert_eq!(trie.search("app".to_string()), false);
assert_eq!(trie.starts_with("app".to_string()), true);
trie.insert("app".to_string());
assert_eq!(trie.search("app".to_string()), true);
}
|
//! Generating a set of field keys and values given a specification
use crate::{
now_ns, specification,
substitution::{self, pick_from_replacements},
};
use handlebars::Handlebars;
use rand::rngs::SmallRng;
use rand::Rng;
use rand::SeedableRng;
use serde_json::json;
use serde_json::Value;
use snafu::{ResultExt, Snafu};
use std::{ops::Range, time::Duration};
/// Field-specific Results
pub type Result<T, E = Error> = std::result::Result<T, E>;
/// Errors that may happen while creating fields
#[derive(Snafu, Debug)]
#[allow(missing_docs)]
pub enum Error {
#[snafu(display("Could not create field name, caused by:\n{}", source))]
CouldNotCreateFieldName { source: crate::substitution::Error },
#[snafu(display("Could not compile string field template: {}", source))]
CouldNotCompileStringTemplate {
#[snafu(source(from(handlebars::TemplateError, Box::new)))]
source: Box<handlebars::TemplateError>,
},
#[snafu(display("Could not render string field template: {}", source))]
CouldNotRenderStringTemplate {
#[snafu(source(from(handlebars::RenderError, Box::new)))]
source: Box<handlebars::RenderError>,
},
}
/// Different field type generators
#[derive(Debug)]
pub enum FieldGeneratorImpl {
/// Boolean field generator
Bool(BooleanFieldGenerator),
/// Integer field generator
I64(I64FieldGenerator),
/// Float field generator
F64(F64FieldGenerator),
/// String field generator
String(Box<StringFieldGenerator>),
/// Uptime field generator
Uptime(UptimeFieldGenerator),
}
impl FieldGeneratorImpl {
/// Create fields that will generate according to the spec
pub fn from_spec(
spec: &specification::FieldSpec,
data: Value,
execution_start_time: i64,
) -> Result<Vec<Self>> {
use specification::FieldValueSpec::*;
let field_count = spec.count.unwrap_or(1);
let mut fields = Vec::with_capacity(field_count);
for field_id in 1..field_count + 1 {
let mut data = data.clone();
let d = data.as_object_mut().expect("data must be object");
d.insert("field".to_string(), json!({ "id": field_id }));
let field_name = substitution::render_once("field", &spec.name, &data)
.context(CouldNotCreateFieldNameSnafu)?;
let rng =
SmallRng::from_rng(&mut rand::thread_rng()).expect("SmallRng should always create");
let field = match &spec.field_value_spec {
Bool(true) => Self::Bool(BooleanFieldGenerator::new(&field_name, rng)),
Bool(false) => unimplemented!("Not sure what false means for bool fields yet"),
I64 {
range,
increment,
reset_after,
} => Self::I64(I64FieldGenerator::new(
&field_name,
range,
*increment,
*reset_after,
rng,
)),
F64 { range } => Self::F64(F64FieldGenerator::new(&field_name, range, rng)),
String {
pattern,
replacements,
} => Self::String(Box::new(StringFieldGenerator::new(
&field_name,
pattern,
data,
replacements.to_vec(),
rng,
)?)),
Uptime { kind } => Self::Uptime(UptimeFieldGenerator::new(
&field_name,
kind,
execution_start_time,
)),
};
fields.push(field);
}
Ok(fields)
}
/// Writes the field in line protocol to the passed writer
pub fn write_to<W: std::io::Write>(&mut self, mut w: W, timestamp: i64) -> std::io::Result<()> {
match self {
Self::Bool(f) => {
let v: bool = f.rng.gen();
write!(w, "{}={}", f.name, v)
}
Self::I64(f) => {
let v = f.generate_value();
write!(w, "{}={}", f.name, v)
}
Self::F64(f) => {
let v = f.generate_value();
write!(w, "{}={}", f.name, v)
}
Self::String(f) => {
let v = f.generate_value(timestamp);
write!(w, "{}=\"{}\"", f.name, v)
}
Self::Uptime(f) => match f.kind {
specification::UptimeKind::I64 => {
let v = f.generate_value();
write!(w, "{}={}", f.name, v)
}
specification::UptimeKind::Telegraf => {
let v = f.generate_value_as_string();
write!(w, "{}=\"{}\"", f.name, v)
}
},
}
}
}
/// Generate boolean field names and values.
#[derive(Debug)]
pub struct BooleanFieldGenerator {
/// The name (key) of the field
pub name: String,
rng: SmallRng,
}
impl BooleanFieldGenerator {
/// Create a new boolean field generator that will always use the specified
/// name.
pub fn new(name: &str, rng: SmallRng) -> Self {
let name = name.into();
Self { name, rng }
}
/// Generate a random value
pub fn generate_value(&mut self) -> bool {
self.rng.gen()
}
}
/// Generate integer field names and values.
#[derive(Debug)]
pub struct I64FieldGenerator {
/// The name (key) of the field
pub name: String,
range: Range<i64>,
increment: bool,
rng: SmallRng,
previous_value: i64,
reset_after: Option<usize>,
current_tick: usize,
}
impl I64FieldGenerator {
/// Create a new integer field generator that will always use the specified
/// name.
pub fn new(
name: impl Into<String>,
range: &Range<i64>,
increment: bool,
reset_after: Option<usize>,
rng: SmallRng,
) -> Self {
Self {
name: name.into(),
range: range.to_owned(),
increment,
rng,
previous_value: 0,
reset_after,
current_tick: 0,
}
}
/// Generate a random value
pub fn generate_value(&mut self) -> i64 {
let mut value = if self.range.start == self.range.end {
self.range.start
} else {
self.rng.gen_range(self.range.clone())
};
if self.increment {
self.previous_value = self.previous_value.wrapping_add(value);
value = self.previous_value;
if let Some(reset) = self.reset_after {
self.current_tick += 1;
if self.current_tick >= reset {
self.previous_value = 0;
self.current_tick = 0;
}
}
}
value
}
}
/// Generate floating point field names and values.
#[derive(Debug)]
pub struct F64FieldGenerator {
/// The name (key) of the field
pub name: String,
range: Range<f64>,
rng: SmallRng,
}
impl F64FieldGenerator {
/// Create a new floating point field generator that will always use the
/// specified name.
pub fn new(name: impl Into<String>, range: &Range<f64>, rng: SmallRng) -> Self {
Self {
name: name.into(),
range: range.to_owned(),
rng,
}
}
/// Generate a random value
pub fn generate_value(&mut self) -> f64 {
if (self.range.start - self.range.end).abs() < f64::EPSILON {
self.range.start
} else {
self.rng.gen_range(self.range.clone())
}
}
}
/// Generate string field names and values.
#[derive(Debug)]
pub struct StringFieldGenerator {
/// The name (key) of the field
pub name: String,
rng: SmallRng,
replacements: Vec<specification::Replacement>,
handlebars: Handlebars<'static>,
data: Value,
}
impl StringFieldGenerator {
/// Create a new string field generator
pub fn new(
name: impl Into<String>,
template: impl Into<String>,
data: Value,
replacements: Vec<specification::Replacement>,
rng: SmallRng,
) -> Result<Self> {
let name = name.into();
let mut registry = substitution::new_handlebars_registry();
registry
.register_template_string(&name, template.into())
.context(CouldNotCompileStringTemplateSnafu)?;
Ok(Self {
name,
rng,
replacements,
handlebars: registry,
data,
})
}
/// Generate a random value
pub fn generate_value(&mut self, timestamp: i64) -> String {
let replacements = pick_from_replacements(&mut self.rng, &self.replacements);
let d = self.data.as_object_mut().expect("data must be object");
if replacements.is_empty() {
d.remove("replacements");
} else {
d.insert("replacements".to_string(), json!(replacements));
}
d.insert("timestamp".to_string(), json!(timestamp));
self.handlebars
.render(&self.name, &self.data)
.expect("Unable to substitute string field value")
}
}
/// Generate an i64 field that has the name `uptime` and the value of the number
/// of seconds since the data generator started running
#[derive(Debug)]
pub struct UptimeFieldGenerator {
/// The name (key) of the field
pub name: String,
execution_start_time: i64,
/// The specification type of the uptime field. Either an int64 or a string
pub kind: specification::UptimeKind,
}
impl UptimeFieldGenerator {
fn new(
name: impl Into<String>,
kind: &specification::UptimeKind,
execution_start_time: i64,
) -> Self {
Self {
name: name.into(),
kind: *kind,
execution_start_time,
}
}
/// Generates the uptime as an i64
pub fn generate_value(&mut self) -> i64 {
let elapsed = Duration::from_nanos((now_ns() - self.execution_start_time) as u64);
elapsed.as_secs() as i64
}
/// Generates the uptime as a string, which is what should be used if `self.kind == specification::UptimeKind::Telegraf`
pub fn generate_value_as_string(&mut self) -> String {
let elapsed_seconds = self.generate_value();
let days = elapsed_seconds / (60 * 60 * 24);
let days_plural = if days == 1 { "" } else { "s" };
let mut minutes = elapsed_seconds / 60;
let mut hours = minutes / 60;
hours %= 24;
minutes %= 60;
format!("{days} day{days_plural}, {hours:02}:{minutes:02}")
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::specification::UptimeKind;
use rand::SeedableRng;
use test_helpers::approximately_equal;
#[test]
fn generate_i64_field_always_the_same() {
// If the specification has the same number for the start and end of the
// range...
let mut i64fg =
I64FieldGenerator::new("i64fg", &(3..3), false, None, SmallRng::from_entropy());
let i64_fields: Vec<_> = (0..10).map(|_| i64fg.generate_value()).collect();
let expected = i64_fields[0];
// All the values generated will always be the same.
assert!(i64_fields.iter().all(|f| *f == expected), "{i64_fields:?}");
// If the specification has n for the start and n+1 for the end of the range...
let mut i64fg =
I64FieldGenerator::new("i64fg", &(4..5), false, None, SmallRng::from_entropy());
let i64_fields: Vec<_> = (0..10).map(|_| i64fg.generate_value()).collect();
// We know what the value will be even though we're using a real random number generator
let expected = 4;
// All the values generated will also always be the same, because the end of the
// range is exclusive.
assert!(i64_fields.iter().all(|f| *f == expected), "{i64_fields:?}");
}
#[test]
fn generate_i64_field_within_a_range() {
let range = 3..1000;
let mut i64fg =
I64FieldGenerator::new("i64fg", &range, false, None, SmallRng::from_entropy());
let val = i64fg.generate_value();
assert!(range.contains(&val), "`{val}` was not in the range");
}
#[test]
fn generate_incrementing_i64_field() {
let mut i64fg =
I64FieldGenerator::new("i64fg", &(3..10), true, None, SmallRng::from_entropy());
let val1 = i64fg.generate_value();
let val2 = i64fg.generate_value();
let val3 = i64fg.generate_value();
let val4 = i64fg.generate_value();
assert!(val1 < val2, "`{val1}` < `{val2}` was false");
assert!(val2 < val3, "`{val2}` < `{val3}` was false");
assert!(val3 < val4, "`{val3}` < `{val4}` was false");
}
#[test]
fn incrementing_i64_wraps() {
let rng = SmallRng::from_entropy();
let range = 3..10;
let previous_value = i64::MAX;
// Construct by hand to set the previous value at the end of i64's range
let mut i64fg = I64FieldGenerator {
name: "i64fg".into(),
range: range.clone(),
increment: true,
reset_after: None,
rng,
previous_value,
current_tick: 0,
};
let resulting_range =
range.start.wrapping_add(previous_value)..range.end.wrapping_add(previous_value);
let val = i64fg.generate_value();
assert!(
resulting_range.contains(&val),
"`{val}` was not in the range"
);
}
#[test]
fn incrementing_i64_that_resets() {
let reset_after = Some(3);
let mut i64fg = I64FieldGenerator::new(
"i64fg",
&(3..8),
true,
reset_after,
SmallRng::from_entropy(),
);
let val1 = i64fg.generate_value();
let val2 = i64fg.generate_value();
let val3 = i64fg.generate_value();
let val4 = i64fg.generate_value();
assert!(val1 < val2, "`{val1}` < `{val2}` was false");
assert!(val2 < val3, "`{val2}` < `{val3}` was false");
assert!(val4 < val3, "`{val4}` < `{val3}` was false");
}
#[test]
fn generate_f64_field_always_the_same() {
// If the specification has the same number for the start and end of the
// range...
let start_and_end = 3.0;
let range = start_and_end..start_and_end;
let mut f64fg = F64FieldGenerator::new("f64fg", &range, SmallRng::from_entropy());
let f64_fields: Vec<_> = (0..10).map(|_| f64fg.generate_value()).collect();
// All the values generated will always be the same known value.
assert!(
f64_fields
.iter()
.all(|f| approximately_equal(*f, start_and_end)),
"{f64_fields:?}"
);
}
#[test]
fn generate_f64_field_within_a_range() {
let range = 3.0..1000.0;
let mut f64fg = F64FieldGenerator::new("f64fg", &range, SmallRng::from_entropy());
let val = f64fg.generate_value();
assert!(range.contains(&val), "`{val}` was not in the range");
}
#[test]
fn generate_string_field_with_data() {
let fake_now = 1633595510000000000;
let mut stringfg = StringFieldGenerator::new(
"str",
r#"my value {{measurement.name}} {{format-time "%Y-%m-%d"}}"#,
json!({"measurement": {"name": "foo"}}),
vec![],
SmallRng::from_entropy(),
)
.unwrap();
assert_eq!("my value foo 2021-10-07", stringfg.generate_value(fake_now));
}
#[test]
fn uptime_i64() {
// Pretend data generator started running 10 seconds ago
let seconds_ago = 10;
let execution_start_time = now_ns() - seconds_ago * 1_000_000_000;
let mut uptimefg = UptimeFieldGenerator::new("foo", &UptimeKind::I64, execution_start_time);
assert_eq!(seconds_ago, uptimefg.generate_value());
}
#[test]
fn uptime_telegraf() {
// Pretend data generator started running 10 days, 2 hours, and 33 minutes ago
let seconds_ago = 10 * 24 * 60 * 60 + 2 * 60 * 60 + 33 * 60;
let execution_start_time = now_ns() - seconds_ago * 1_000_000_000;
let mut uptimefg = UptimeFieldGenerator::new("foo", &UptimeKind::I64, execution_start_time);
assert_eq!("10 days, 02:33", uptimefg.generate_value_as_string());
// Pretend data generator started running 1 day, 14 hours, and 5 minutes ago
// to exercise different formatting
let seconds_in_1_day = 24 * 60 * 60;
let seconds_in_14_hours = 14 * 60 * 60;
let seconds_in_5_minutes = 5 * 60;
let seconds_ago = seconds_in_1_day + seconds_in_14_hours + seconds_in_5_minutes;
let execution_start_time = now_ns() - seconds_ago * 1_000_000_000;
let mut uptimefg = UptimeFieldGenerator::new("foo", &UptimeKind::I64, execution_start_time);
assert_eq!("1 day, 14:05", uptimefg.generate_value_as_string());
}
}
|
//! The parser: turns a series of tokens into an AST
use arrayvec::ArrayVec;
use crate::{
tokenizer::{Interpol as TokenInterpol, Meta, Span, Token, TokenKind},
value::Value
};
use std::fmt;
pub use arenatree::NodeId;
use arenatree::*;
pub mod types;
use self::types::{Error as ErrorNode, TypedNode};
const OR: &'static str = "or";
pub type Arena<'a> = arenatree::Arena<'a, ASTNode>;
/// An error that occured during parsing
#[derive(Clone, Debug, Fail, PartialEq)]
pub enum ParseError {
#[fail(display = "can't bind pattern here, already bound before")]
AlreadyBound,
#[fail(display = "expected {:?}, found {:?}", _0, _1)]
Expected(TokenKind, Option<TokenKind>),
#[fail(display = "invalid type! expected {}", _0)]
InvalidType(&'static str),
#[fail(display = "unexpected eof")]
UnexpectedEOF,
#[fail(display = "unexpected token {:?} not applicable in this context", _0)]
Unexpected(TokenKind)
}
/// An AST with the arena and node
pub struct AST<'a> {
pub arena: Arena<'a>,
pub root: NodeId
}
impl<'a> AST<'a> {
pub fn errors<'b>(&'b self) -> impl Iterator<Item = ErrorNode> + 'b {
self.arena.iter().filter_map(ErrorNode::cast)
}
fn fmt_node(&self, f: &mut fmt::Formatter, node: NodeId, indent: usize) -> fmt::Result {
let node = &self.arena[node];
write!(f, "{fill:indent$}{:?}", node.kind, fill = "", indent = indent)?;
match &node.data {
Data::None => writeln!(f),
Data::Error(err) => writeln!(f, " = ERROR: {}", err.1),
Data::Ident(_meta, name) => writeln!(f, " = {}", name),
Data::Interpol { meta: _, multiline } => writeln!(f, " {{ multiline: {} }}", multiline),
Data::InterpolLiteral { original, content: _ } => writeln!(f, " = \"{}\"", original),
Data::Token(_meta, token) => writeln!(f, " = {:?}", token),
Data::Value(_meta, value) => writeln!(f, " = {}", value)
}?;
for child in node.children(&self.arena) {
self.fmt_node(f, child, indent+2)?;
}
Ok(())
}
}
impl<'a> fmt::Debug for AST<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.fmt_node(f, self.root, 0)
}
}
#[derive(Clone, Debug)]
pub enum Data {
None,
Error(Error),
Ident(Meta, String),
Interpol {
meta: Meta,
multiline: bool
},
InterpolLiteral {
original: String,
content: String
},
Token(Meta, TokenKind),
Value(Meta, Value)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ASTKind {
Apply,
Assert,
Attribute,
Dynamic,
Error,
Ident,
IfElse,
Import,
IndexSet,
Inherit,
InheritFrom,
Interpol,
InterpolAst,
InterpolLiteral,
Lambda,
Let,
LetIn,
List,
ListItem,
Operation,
OrDefault,
Paren,
PatBind,
PatEntry,
Pattern,
Set,
SetEntry,
Token,
Unary,
Value,
With
}
#[derive(Clone, Debug)]
pub struct ASTNode {
pub kind: ASTKind,
pub span: Span,
pub data: Data,
pub node: Node
}
impl ASTNode {
fn from_token_kind(meta: Meta, token: TokenKind) -> Self {
Self {
kind: ASTKind::Token,
span: meta.span,
data: Data::Token(meta, token),
node: Node::default()
}
}
fn from_token(tuple: (Meta, Token)) -> Self {
let (meta, token) = tuple;
Self::from_token_kind(meta, token.kind())
}
fn from_ident(meta: Meta, ident: String) -> Self {
Self {
kind: ASTKind::Ident,
span: meta.span,
data: Data::Ident(meta, ident),
node: Node::default()
}
}
fn from_value(meta: Meta, value: Value) -> Self {
Self {
kind: ASTKind::Value,
span: meta.span,
data: Data::Value(meta, value),
node: Node::default()
}
}
/// If this node has a child, it returns an iterator over that child and all its siblings.
/// If this node is a dead end, return an empty iterator.
pub fn children<'a>(&self, arena: &'a Arena) -> NodeIter<'a, ASTNode> {
NodeIter {
arena,
cursor: self.node.child
}
}
}
impl AsRef<Node> for ASTNode {
fn as_ref(&self) -> &Node {
&self.node
}
}
impl AsMut<Node> for ASTNode {
fn as_mut(&mut self) -> &mut Node {
&mut self.node
}
}
pub(crate) type Error = (Option<Span>, ParseError);
pub(crate) type Result<T> = std::result::Result<T, Error>;
macro_rules! math {
(only_once: $only_once:expr, $self:expr, $next:block, $($token:pat),*) => {{
let mut val = $next;
loop {
match $self.peek_kind() {
$(Some($token) => {
let val_span = val.span;
let val_id = $self.insert(val);
let operator = ASTNode::from_token($self.next().unwrap());
let operator = $self.insert(operator);
let expr = $next;
let expr_span = expr.span;
let expr = $self.insert(expr);
let children = $self.chain(&[val_id, operator, expr]);
val = ASTNode {
kind: ASTKind::Operation,
span: val_span.until(expr_span),
data: Data::None,
node: Node::with_child(children)
};
},)*
_ => break
}
}
Ok(val)
}};
($self:expr, $next:block, $($token:pat),*) => {{
math!(only_once: false, $self, $next, $($token),*)
}};
}
/// The parser. You may want to use the `parse` convenience function from this module instead.
pub struct Parser<'a, I>
where I: Iterator<Item = (Meta, Token)>
{
iter: I,
buffer: ArrayVec<[I::Item; 2]>,
arena: Arena<'a>
}
impl<'a, I> Parser<'a, I>
where I: Iterator<Item = (Meta, Token)>
{
/// Create a new instance
pub fn new(iter: I) -> Self {
Self::with_arena(Arena::new(), iter)
}
/// Create a new instance with a specified arena
pub fn with_arena(arena: Arena<'a>, iter: I) -> Self {
Self {
iter,
buffer: ArrayVec::new(),
arena
}
}
/// Return a reference to the inner arena
pub fn arena(&self) -> &Arena<'a> {
&self.arena
}
/// Return the owned inner arena
pub fn into_arena(self) -> Arena<'a> {
self.arena
}
fn parse_branch<T>(&mut self, iter: T) -> Result<ASTNode>
where T: IntoIterator<Item = (Meta, Token)>
{
Parser::with_arena(self.arena.reference(), iter.into_iter())
.parse_expr()
}
fn insert(&mut self, node: ASTNode) -> NodeId {
self.arena.insert(node)
}
fn chain(&mut self, nodes: &[NodeId]) -> NodeId {
let mut list = NodeList::new();
list.push_all(nodes, &mut self.arena);
list.node().expect("chain called on empty list")
}
fn peek_meta(&mut self) -> Option<&(Meta, Token)> {
if self.buffer.is_empty() {
if let Some(token) = self.iter.next() {
self.buffer.push(token);
}
}
self.buffer.last()
}
fn peek(&mut self) -> Option<&Token> {
self.peek_meta().map(|(_, token)| token)
}
fn peek_kind(&mut self) -> Option<TokenKind> {
self.peek().map(Token::kind)
}
fn next_raw(&mut self) -> Result<I::Item> {
self.buffer.pop()
.or_else(|| self.iter.next())
.ok_or((None, ParseError::UnexpectedEOF))
}
fn next(&mut self) -> Result<I::Item> {
let mut next = self.next_raw()?;
if let Some(TokenKind::EOF) = self.peek_kind() {
let (mut meta, _) = self.next_raw()?;
next.0.trailing.append(&mut meta.leading);
}
Ok(next)
}
fn expect(&mut self, expected: TokenKind) -> Result<(Meta, Token)> {
if let Some((meta, actual)) = self.peek_meta() {
if actual.kind() == expected {
Ok(self.next().unwrap())
} else {
Err((Some(meta.span), ParseError::Expected(expected, Some(actual.kind()))))
}
} else {
Err((None, ParseError::Expected(expected, None)))
}
}
fn recover(&mut self, recover: &[TokenKind]) -> bool {
loop {
match self.peek_kind() {
Some(kind) if recover.contains(&kind) => return true,
None => return false,
_ => { self.next().unwrap(); }
}
}
}
fn parse_interpol(&mut self, meta: Meta, multiline: bool, values: Vec<TokenInterpol>) -> Result<ASTNode> {
let mut parsed = NodeList::new();
for value in values {
parsed.push(match value {
TokenInterpol::Literal { span, original, content } => self.insert(ASTNode {
kind: ASTKind::InterpolLiteral,
span,
data: Data::InterpolLiteral { original, content },
node: Node::default()
}),
TokenInterpol::Tokens(tokens, close) => {
let parsed = self.parse_branch(tokens)?;
let parsed_span = parsed.span;
let parsed = self.insert(parsed);
let close_span = close.span;
let close = self.insert(ASTNode::from_token_kind(close, TokenKind::CurlyBClose));
let children = self.chain(&[parsed, close]);
self.insert(ASTNode {
kind: ASTKind::InterpolAst,
span: parsed_span.until(close_span),
data: Data::None,
node: Node::with_child(children)
})
}
}, &mut self.arena);
}
Ok(ASTNode {
kind: ASTKind::Interpol,
span: meta.span,
data: Data::Interpol { meta, multiline },
node: Node::with_child(parsed.node())
})
}
fn next_attr(&mut self) -> Result<ASTNode> {
match self.next()? {
(meta, Token::Ident(ident)) => Ok(ASTNode::from_ident(meta, ident)),
(meta, Token::Value(value)) => Ok(ASTNode::from_value(meta, value)),
(meta, Token::Dynamic(tokens, close)) => {
let open = ASTNode::from_token_kind(meta, TokenKind::Dynamic);
let open_span = open.span;
let open = self.insert(open);
let parsed = self.parse_branch(tokens)?;
let parsed = self.insert(parsed);
let close_span = close.span;
let close = self.insert(ASTNode::from_token_kind(close, TokenKind::CurlyBClose));
let children = self.chain(&[open, parsed, close]);
Ok(ASTNode {
kind: ASTKind::Dynamic,
span: open_span.until(close_span),
data: Data::None,
node: Node::with_child(children)
})
},
(meta, Token::Interpol { multiline, parts }) => self.parse_interpol(meta, multiline, parts),
(meta, token) => Err((Some(meta.span), ParseError::Expected(TokenKind::Ident, Some(token.kind()))))
}
}
fn parse_attr(&mut self) -> Result<ASTNode> {
let mut path = NodeList::new();
let mut start = None;
let mut end;
loop {
let attr = self.next_attr()?;
if start.is_none() {
start = Some(attr.span);
}
end = attr.span;
path.push(self.insert(attr), &mut self.arena);
if self.peek_kind() == Some(TokenKind::Dot) {
let dot = ASTNode::from_token(self.next().unwrap());
path.push(self.insert(dot), &mut self.arena);
} else {
break;
}
}
Ok(ASTNode {
data: Data::None,
kind: ASTKind::Attribute,
span: start.unwrap().until(end),
node: Node::with_child(path.node())
})
}
fn next_ident(&mut self) -> Result<ASTNode> {
match self.next()? {
(meta, Token::Ident(name)) => Ok(ASTNode::from_ident(meta, name)),
(meta, token) => Err((Some(meta.span), ParseError::Expected(TokenKind::Ident, Some(token.kind()))))
}
}
fn parse_pattern(&mut self, open: Meta, bind: Option<(ASTNode, ASTNode)>) -> Result<ASTNode> {
let start = bind.as_ref().map(|(ident, _)| ident.span).unwrap_or(open.span);
let mut pattern = NodeList::new();
let bind = if let Some((ident, at)) = bind {
let ident_span = ident.span;
let ident = self.insert(ident);
let at_span = at.span;
let at = self.insert(at);
let children = self.chain(&[ident, at]);
pattern.push(self.insert(ASTNode {
kind: ASTKind::PatBind,
span: ident_span.until(at_span),
data: Data::None,
node: Node::with_child(children)
}), &mut self.arena);
true
} else {
false
};
let open = ASTNode::from_token_kind(open, TokenKind::CurlyBOpen);
let open = self.insert(open);
pattern.push(open, &mut self.arena);
loop {
let mut entry = NodeList::new();
let ident = match self.peek_kind() {
Some(TokenKind::Ellipsis) => {
let ellipsis = ASTNode::from_token(self.next().unwrap());
pattern.push(self.insert(ellipsis), &mut self.arena);
break;
},
Some(TokenKind::CurlyBClose) => break,
_ => self.next_ident()?,
};
let ident_span = ident.span;
let mut end_span = ident_span;
entry.push(self.insert(ident), &mut self.arena);
if self.peek_kind() == Some(TokenKind::Question) {
let question = ASTNode::from_token(self.next().unwrap());
let expr = self.parse_expr()?;
end_span = expr.span;
entry.push_all(&[
self.insert(question),
self.insert(expr),
], &mut self.arena);
}
let comma = match self.peek_kind() {
Some(TokenKind::Comma) => {
let comma = ASTNode::from_token(self.next().unwrap());
end_span = comma.span;
entry.push(self.insert(comma), &mut self.arena);
true
},
_ => false
};
pattern.push(self.insert(ASTNode {
kind: ASTKind::PatEntry,
span: ident_span.until(end_span),
data: Data::None,
node: Node::with_child(entry.node())
}), &mut self.arena);
if !comma {
break;
}
}
let close = ASTNode::from_token(self.expect(TokenKind::CurlyBClose)?);
let mut end_span = close.span;
pattern.push(self.insert(close), &mut self.arena);
if let Some(TokenKind::At) = self.peek_kind() {
let at = ASTNode::from_token(self.next().unwrap());
if bind {
return Err((Some(at.span), ParseError::AlreadyBound));
}
let at_span = at.span;
let at = self.insert(at);
let ident = self.next_ident()?;
end_span = ident.span;
let ident = self.insert(ident);
let children = self.chain(&[at, ident]);
pattern.push(self.insert(ASTNode {
kind: ASTKind::PatBind,
span: at_span.until(end_span),
data: Data::None,
node: Node::with_child(children)
}), &mut self.arena);
}
let pattern = self.insert(ASTNode {
kind: ASTKind::Pattern,
span: start.until(end_span),
data: Data::None,
node: Node::with_child(pattern.node())
});
let colon = ASTNode::from_token(self.expect(TokenKind::Colon)?);
let colon = self.insert(colon);
let expr = self.parse_expr()?;
let expr_span = expr.span;
let expr = self.insert(expr);
let children = self.chain(&[pattern, colon, expr]);
Ok(ASTNode {
kind: ASTKind::Lambda,
span: start.until(expr_span),
data: Data::None,
node: Node::with_child(children)
})
}
fn parse_set(&mut self, until: TokenKind) -> Result<(Option<NodeId>, ASTNode)> {
let mut values = NodeList::new();
loop {
let result = try {
match self.peek_kind() {
token if token == Some(until) => break,
Some(TokenKind::Inherit) => {
let inherit = ASTNode::from_token(self.next().unwrap());
let inherit_span = inherit.span;
let inherit = self.insert(inherit);
let mut vars = NodeList::new();
vars.push(inherit, &mut self.arena);
if self.peek_kind() == Some(TokenKind::ParenOpen) {
let open = ASTNode::from_token(self.next().unwrap());
let open_span = open.span;
let open = self.insert(open);
let from = self.parse_expr()?;
let from = self.insert(from);
let close = ASTNode::from_token(self.expect(TokenKind::ParenClose)?);
let close_span = close.span;
let close = self.insert(close);
let children = self.chain(&[open, from, close]);
vars.push(self.insert(ASTNode {
kind: ASTKind::InheritFrom,
span: open_span.until(close_span),
data: Data::None,
node: Node::with_child(children)
}), &mut self.arena)
}
while let Some(Token::Ident(_)) = self.peek() {
let ident = self.next_ident().unwrap();
vars.push(self.insert(ident), &mut self.arena);
}
let semi = ASTNode::from_token(self.expect(TokenKind::Semicolon)?);
let semi_span = semi.span;
let semi = self.insert(semi);
vars.push(semi, &mut self.arena);
values.push(self.insert(ASTNode {
kind: ASTKind::Inherit,
span: inherit_span.until(semi_span),
data: Data::None,
node: Node::with_child(vars.node())
}), &mut self.arena);
},
_ => {
let key = self.parse_attr()?;
let key_span = key.span;
let key = self.insert(key);
let assign = ASTNode::from_token(self.expect(TokenKind::Assign)?);
let assign = self.insert(assign);
let value = self.parse_expr()?;
let value = self.insert(value);
let semi = ASTNode::from_token(self.expect(TokenKind::Semicolon)?);
let semi_span = semi.span;
let semi = self.insert(semi);
let entry = self.chain(&[
key,
assign,
value,
semi,
]);
values.push(self.insert(ASTNode {
kind: ASTKind::SetEntry,
span: key_span.until(semi_span),
data: Data::None,
node: Node::with_child(entry)
}), &mut self.arena)
}
}
};
if let Err(err) = result {
if self.recover(&[TokenKind::Ident, TokenKind::Inherit, until]) {
values.push(self.insert(ASTNode {
kind: ASTKind::Error,
span: Span::default(),
data: Data::Error(err),
node: Node::default()
}), &mut self.arena);
} else {
return Err(err);
}
}
}
let end = ASTNode::from_token(self.next().unwrap()); // Won't break until reached
Ok((values.node(), end))
}
fn parse_val(&mut self) -> Result<ASTNode> {
let (meta, token) = self.next()?;
let kind = token.kind();
let mut val = match (kind, token) {
(TokenKind::ParenOpen, _) => {
let open = ASTNode::from_token_kind(meta, TokenKind::ParenOpen);
let open_span = open.span;
let open = self.insert(open);
let expr = self.parse_expr()?;
let expr = self.insert(expr);
let close = ASTNode::from_token(self.expect(TokenKind::ParenClose)?);
let close_span = close.span;
let close = self.insert(close);
let children = self.chain(&[open, expr, close]);
ASTNode {
kind: ASTKind::Paren,
span: open_span.until(close_span),
data: Data::None,
node: Node::with_child(children)
}
},
(TokenKind::Import, _) => {
let import = ASTNode::from_token_kind(meta, kind);
let import_span = import.span;
let import = self.insert(import);
let value = self.parse_val()?;
let value_span = value.span;
let value = self.insert(value);
let children = self.chain(&[import, value]);
ASTNode {
kind: ASTKind::Import,
span: import_span.until(value_span),
data: Data::None,
node: Node::with_child(children)
}
},
(TokenKind::Rec, _) => {
let rec = ASTNode::from_token_kind(meta, kind);
let rec_span = rec.span;
let rec = self.insert(rec);
let open = ASTNode::from_token(self.expect(TokenKind::CurlyBOpen)?);
let open = self.insert(open);
let (values, close) = self.parse_set(TokenKind::CurlyBClose)?;
let close_span = close.span;
let close = self.insert(close);
let children = if let Some(values) = values {
self.chain(&[rec, open, values, close])
} else {
self.chain(&[rec, open, close])
};
ASTNode {
kind: ASTKind::Set,
span: rec_span.until(close_span),
data: Data::None,
node: Node::with_child(children)
}
},
(TokenKind::CurlyBOpen, _) => {
let temporary = self.next()?;
match (temporary.1.kind(), self.peek_kind()) {
(TokenKind::Ident, Some(TokenKind::Comma))
| (TokenKind::Ident, Some(TokenKind::Question))
| (TokenKind::Ellipsis, Some(TokenKind::CurlyBClose))
| (TokenKind::Ident, Some(TokenKind::CurlyBClose))
| (TokenKind::CurlyBClose, Some(TokenKind::Colon))
| (TokenKind::CurlyBClose, Some(TokenKind::At)) => {
// We did a lookahead, put it back
self.buffer.push(temporary);
self.parse_pattern(meta, None)?
},
_ => {
// We did a lookahead, put it back
self.buffer.push(temporary);
let open_span = meta.span;
let open = self.insert(ASTNode::from_token_kind(meta, kind));
let (values, close) = self.parse_set(TokenKind::CurlyBClose)?;
let close_span = close.span;
let close = self.insert(close);
let children = if let Some(values) = values {
self.chain(&[open, values, close])
} else {
self.chain(&[open, close])
};
ASTNode {
kind: ASTKind::Set,
span: open_span.until(close_span),
data: Data::None,
node: Node::with_child(children)
}
}
}
},
(TokenKind::SquareBOpen, _) => {
let mut values = NodeList::new();
let open = ASTNode::from_token_kind(meta, kind);
let open_span = open.span;
values.push(self.insert(open), &mut self.arena);
loop {
match self.peek_kind() {
None | Some(TokenKind::SquareBClose) => break,
_ => {
let val = self.parse_val()?;
let val_span = val.span;
let val = self.insert(val);
values.push(self.insert(ASTNode {
kind: ASTKind::ListItem,
span: val_span,
data: Data::None,
node: Node::with_child(val)
}), &mut self.arena);
}
}
}
let close = ASTNode::from_token(self.expect(TokenKind::SquareBClose)?);
let close_span = close.span;
values.push(self.insert(close), &mut self.arena);
ASTNode {
kind: ASTKind::List,
span: open_span.until(close_span),
data: Data::None,
node: Node::with_child(values.node())
}
},
(_, Token::Dynamic(tokens, close)) => {
let open = ASTNode::from_token_kind(meta, kind);
let open_span = open.span;
let open = self.insert(open);
let parsed = self.parse_branch(tokens)?;
let parsed = self.insert(parsed);
let close_span = close.span;
let close = self.insert(ASTNode::from_token_kind(close, TokenKind::CurlyBClose));
let children = self.chain(&[open, parsed, close]);
ASTNode {
kind: ASTKind::Dynamic,
span: open_span.until(close_span),
data: Data::None,
node: Node::with_child(children)
}
},
(_, Token::Value(val)) => ASTNode::from_value(meta, val),
(_, Token::Ident(name)) => if self.peek_kind() == Some(TokenKind::At) {
let ident = ASTNode::from_ident(meta, name);
let at = ASTNode::from_token(self.next().unwrap());
let (open, _) = self.expect(TokenKind::CurlyBOpen)?;
self.parse_pattern(open, Some((ident, at)))?
} else {
ASTNode::from_ident(meta, name)
},
(_, Token::Interpol { multiline, parts }) => self.parse_interpol(meta, multiline, parts)?,
(kind, _) => return Err((Some(meta.span), ParseError::Unexpected(kind)))
};
while self.peek_kind() == Some(TokenKind::Dot) {
let val_span = val.span;
let val_id = self.insert(val);
let dot = ASTNode::from_token(self.next().unwrap());
let dot = self.insert(dot);
let attr = self.next_attr()?;
let attr_span = attr.span;
let attr = self.insert(attr);
match self.peek() {
Some(Token::Ident(s)) if s == OR => {
let or = self.next_ident().unwrap();
let or = self.insert(or);
let default = self.parse_val()?;
let default_span = default.span;
let default = self.insert(default);
let children = self.chain(&[val_id, dot, attr, or, default]);
val = ASTNode {
kind: ASTKind::OrDefault,
span: val_span.until(default_span),
data: Data::None,
node: Node::with_child(children)
};
},
_ => {
let children = self.chain(&[val_id, dot, attr]);
val = ASTNode {
kind: ASTKind::IndexSet,
span: val_span.until(attr_span),
data: Data::None,
node: Node::with_child(children)
};
}
}
}
Ok(val)
}
fn parse_fn(&mut self) -> Result<ASTNode> {
let mut val = self.parse_val()?;
while self.peek_kind().map(|t| t.is_fn_arg()).unwrap_or(false) {
let val_span = val.span;
let val_id = self.insert(val);
let arg = self.parse_val()?;
let arg_span = arg.span;
let arg = self.insert(arg);
let children = self.chain(&[val_id, arg]);
val = ASTNode {
kind: ASTKind::Apply,
span: val_span.until(arg_span),
data: Data::None,
node: Node::with_child(children)
};
}
Ok(val)
}
fn parse_negate(&mut self) -> Result<ASTNode> {
if self.peek_kind() == Some(TokenKind::Sub) {
let token = ASTNode::from_token(self.next().unwrap());
let token_span = token.span;
let token = self.insert(token);
let expr = self.parse_negate()?;
let expr_span = expr.span;
let expr = self.insert(expr);
let children = self.chain(&[token, expr]);
Ok(ASTNode {
kind: ASTKind::Unary,
span: token_span.until(expr_span),
data: Data::None,
node: Node::with_child(children)
})
} else {
self.parse_fn()
}
}
fn parse_isset(&mut self) -> Result<ASTNode> {
math!(self, { self.parse_negate()? }, TokenKind::Question)
}
fn parse_concat(&mut self) -> Result<ASTNode> {
math!(self, { self.parse_isset()? }, TokenKind::Concat)
}
fn parse_mul(&mut self) -> Result<ASTNode> {
math!(
self, { self.parse_concat()? },
TokenKind::Mul,
TokenKind::Div
)
}
fn parse_add(&mut self) -> Result<ASTNode> {
math!(
self, { self.parse_mul()? },
TokenKind::Add,
TokenKind::Sub
)
}
fn parse_invert(&mut self) -> Result<ASTNode> {
if self.peek_kind() == Some(TokenKind::Invert) {
let token = ASTNode::from_token(self.next().unwrap());
let token_span = token.span;
let token = self.insert(token);
let expr = self.parse_invert()?;
let expr_span = expr.span;
let expr = self.insert(expr);
let children = self.chain(&[token, expr]);
Ok(ASTNode {
kind: ASTKind::Unary,
span: token_span.until(expr_span),
data: Data::None,
node: Node::with_child(children)
})
} else {
self.parse_add()
}
}
fn parse_merge(&mut self) -> Result<ASTNode> {
math!(self, { self.parse_invert()? }, TokenKind::Merge)
}
fn parse_compare(&mut self) -> Result<ASTNode> {
math!(
only_once: true, self, { self.parse_merge()? },
TokenKind::Less,
TokenKind::LessOrEq,
TokenKind::More,
TokenKind::MoreOrEq
)
}
fn parse_equal(&mut self) -> Result<ASTNode> {
math!(
only_once: true, self, { self.parse_compare()? },
TokenKind::Equal,
TokenKind::NotEqual
)
}
fn parse_and(&mut self) -> Result<ASTNode> {
math!(self, { self.parse_equal()? }, TokenKind::And)
}
fn parse_or(&mut self) -> Result<ASTNode> {
math!(self, { self.parse_and()? }, TokenKind::Or)
}
fn parse_implication(&mut self) -> Result<ASTNode> {
math!(self, { self.parse_or()? }, TokenKind::Implication)
}
#[inline(always)]
fn parse_math(&mut self) -> Result<ASTNode> {
// Always point this to the lowest-level math function there is
self.parse_implication()
}
/// Parse Nix code into an AST
pub fn parse_expr(&mut self) -> Result<ASTNode> {
Ok(match self.peek_kind() {
Some(TokenKind::Let) => {
let let_ = ASTNode::from_token(self.next().unwrap());
let let_span = let_.span;
let let_ = self.insert(let_);
if self.peek_kind() == Some(TokenKind::CurlyBOpen) {
let open = ASTNode::from_token(self.next().unwrap());
let open_span = open.span;
let open = self.insert(open);
let (values, close) = self.parse_set(TokenKind::CurlyBClose)?;
let close_span = close.span;
let close = self.insert(close);
let children = if let Some(values) = values {
self.chain(&[let_, open, values, close])
} else {
self.chain(&[let_, open, close])
};
ASTNode {
kind: ASTKind::Let,
span: open_span.until(close_span),
data: Data::None,
node: Node::with_child(children)
}
} else {
let (values, in_) = self.parse_set(TokenKind::In)?;
let in_ = self.insert(in_);
let expr = self.parse_expr()?;
let expr_span = expr.span;
let expr = self.insert(expr);
let children = if let Some(values) = values {
self.chain(&[let_, values, in_, expr])
} else {
self.chain(&[let_, in_, expr])
};
ASTNode {
kind: ASTKind::LetIn,
span: let_span.until(expr_span),
data: Data::None,
node: Node::with_child(children)
}
}
},
Some(TokenKind::With) => {
let with = ASTNode::from_token(self.next().unwrap());
let with_span = with.span;
let with = self.insert(with);
let namespace = self.parse_expr()?;
let namespace = self.insert(namespace);
let semi = ASTNode::from_token(self.expect(TokenKind::Semicolon)?);
let semi = self.insert(semi);
let body = self.parse_expr()?;
let body_span = body.span;
let body = self.insert(body);
let children = self.chain(&[with, namespace, semi, body]);
ASTNode {
kind: ASTKind::With,
span: with_span.until(body_span),
data: Data::None,
node: Node::with_child(children)
}
},
Some(TokenKind::If) => {
let if_ = ASTNode::from_token(self.next().unwrap());
let if_span = if_.span;
let if_ = self.insert(if_);
let condition = self.parse_expr()?;
let condition = self.insert(condition);
let then = ASTNode::from_token(self.expect(TokenKind::Then)?);
let then = self.insert(then);
let body = self.parse_expr()?;
let body = self.insert(body);
let else_ = ASTNode::from_token(self.expect(TokenKind::Else)?);
let else_ = self.insert(else_);
let else_body = self.parse_expr()?;
let else_body_span = else_body.span;
let else_body = self.insert(else_body);
let children = self.chain(&[if_, condition, then, body, else_, else_body]);
ASTNode {
kind: ASTKind::IfElse,
span: if_span.until(else_body_span),
data: Data::None,
node: Node::with_child(children)
}
},
Some(TokenKind::Assert) => {
let assert = ASTNode::from_token(self.next().unwrap());
let assert_span = assert.span;
let assert = self.insert(assert);
let condition = self.parse_expr()?;
let condition = self.insert(condition);
let semi = ASTNode::from_token(self.expect(TokenKind::Semicolon)?);
let semi = self.insert(semi);
let body = self.parse_expr()?;
let body_span = body.span;
let body = self.insert(body);
let children = self.chain(&[assert, condition, semi, body]);
ASTNode {
kind: ASTKind::Assert,
span: assert_span.until(body_span),
data: Data::None,
node: Node::with_child(children)
}
},
_ => {
let val = self.parse_math()?;
if val.kind == ASTKind::Ident && self.peek_kind() == Some(TokenKind::Colon) {
let val_span = val.span;
let val = self.insert(val);
let colon = ASTNode::from_token(self.next().unwrap());
let colon = self.insert(colon);
let expr = self.parse_expr()?;
let expr_span = expr.span;
let expr = self.insert(expr);
let children = self.chain(&[val, colon, expr]);
ASTNode {
kind: ASTKind::Lambda,
span: val_span.until(expr_span),
data: Data::None,
node: Node::with_child(children)
}
} else {
val
}
}
})
}
}
/// Convenience function for turning an iterator of tokens into an AST
pub fn parse<I>(iter: I) -> Result<AST<'static>>
where I: IntoIterator<Item = (Meta, Token)>
{
let mut parser = Parser::new(iter.into_iter());
let ast = parser.parse_expr()?;
let mut arena = parser.into_arena();
let root = arena.insert(ast);
Ok(AST { arena, root })
}
#[cfg(test)]
mod tests {
use crate::{
tokenizer::{Interpol as TokenInterpol, Meta, Span, Token, TokenKind},
value::{Anchor, Value}
};
use super::*;
macro_rules! assert_eq {
([$($token:expr),*], $expected:expr) => {
let actual = format!("{:?}", parse(vec![$((Meta::default(), $token.into())),*])
.expect("failed to parse test"));
if actual != $expected {
eprintln!("--- Actual ---\n{}--- End ---", actual);
eprintln!("--- Expected ---\n{}--- End ---", $expected);
panic!("Tests did not match");
}
};
}
#[test]
fn set() {
assert_eq!(
[
TokenKind::CurlyBOpen,
Token::Ident("meaning_of_life".into()), TokenKind::Assign, Token::Value(42.into()), TokenKind::Semicolon,
Token::Ident("H4X0RNUM83R".into()), TokenKind::Assign, Token::Value(1.337.into()), TokenKind::Semicolon,
TokenKind::CurlyBClose
],
"\
Set
Token = CurlyBOpen
SetEntry
Attribute
Ident = meaning_of_life
Token = Assign
Value = 42
Token = Semicolon
SetEntry
Attribute
Ident = H4X0RNUM83R
Token = Assign
Value = 1.337
Token = Semicolon
Token = CurlyBClose
"
);
assert_eq!(
[
TokenKind::Rec, TokenKind::CurlyBOpen,
Token::Ident("test".into()), TokenKind::Assign, Token::Value(1.into()), TokenKind::Semicolon,
TokenKind::CurlyBClose
],
"\
Set
Token = Rec
Token = CurlyBOpen
SetEntry
Attribute
Ident = test
Token = Assign
Value = 1
Token = Semicolon
Token = CurlyBClose
"
);
assert_eq!(
[TokenKind::CurlyBOpen, TokenKind::CurlyBClose],
"\
Set
Token = CurlyBOpen
Token = CurlyBClose
"
);
assert_eq!(
[
TokenKind::CurlyBOpen,
Token::Ident("a".into()),
TokenKind::Dot, Token::Value("b".into()),
TokenKind::Assign, Token::Value(1.into()), TokenKind::Semicolon,
Token::Interpol {
multiline: false,
parts: vec![
TokenInterpol::Literal {
span: Span::default(),
original: "c".into(),
content: "c".into()
}
]
},
TokenKind::Dot, Token::Dynamic(vec![(Meta::default(), Token::Ident("d".into()))], Meta::default()),
TokenKind::Assign, Token::Value(2.into()), TokenKind::Semicolon,
TokenKind::CurlyBClose
],
"\
Set
Token = CurlyBOpen
SetEntry
Attribute
Ident = a
Token = Dot
Value = \"b\"
Token = Assign
Value = 1
Token = Semicolon
SetEntry
Attribute
Interpol { multiline: false }
InterpolLiteral = \"c\"
Token = Dot
Dynamic
Token = Dynamic
Ident = d
Token = CurlyBClose
Token = Assign
Value = 2
Token = Semicolon
Token = CurlyBClose
"
);
}
#[test]
fn math() {
assert_eq!(
[
Token::Value(1.into()), TokenKind::Add, Token::Value(2.into()), TokenKind::Mul, Token::Value(3.into())
],
"\
Operation
Value = 1
Token = Add
Operation
Value = 2
Token = Mul
Value = 3
"
);
assert_eq!(
[
Token::Value(5.into()), TokenKind::Mul,
TokenKind::Sub, TokenKind::ParenOpen,
Token::Value(3.into()), TokenKind::Sub, Token::Value(2.into()),
TokenKind::ParenClose
],
"\
Operation
Value = 5
Token = Mul
Unary
Token = Sub
Paren
Token = ParenOpen
Operation
Value = 3
Token = Sub
Value = 2
Token = ParenClose
"
);
}
#[test]
fn let_in() {
assert_eq!(
[
TokenKind::Let,
Token::Ident("a".into()), TokenKind::Assign, Token::Value(42.into()), TokenKind::Semicolon,
TokenKind::In,
Token::Ident("a".into())
],
"\
LetIn
Token = Let
SetEntry
Attribute
Ident = a
Token = Assign
Value = 42
Token = Semicolon
Token = In
Ident = a
"
);
}
#[test]
fn let_legacy_syntax() {
assert_eq!(
[
TokenKind::Let, TokenKind::CurlyBOpen,
Token::Ident("a".into()), TokenKind::Assign, Token::Value(42.into()), TokenKind::Semicolon,
Token::Ident("body".into()), TokenKind::Assign, Token::Ident("a".into()), TokenKind::Semicolon,
TokenKind::CurlyBClose
],
"\
Let
Token = Let
Token = CurlyBOpen
SetEntry
Attribute
Ident = a
Token = Assign
Value = 42
Token = Semicolon
SetEntry
Attribute
Ident = body
Token = Assign
Ident = a
Token = Semicolon
Token = CurlyBClose
"
);
}
#[test]
fn interpolation() {
assert_eq!(
[
Token::Interpol {
multiline: false,
parts: vec![
TokenInterpol::Literal {
span: Span::default(),
original: "Hello, ".into(),
content: "Hello, ".into()
},
TokenInterpol::Tokens(
vec![
(Meta::default(), TokenKind::CurlyBOpen.into()),
(Meta::default(), Token::Ident("world".into())),
(Meta::default(), TokenKind::Assign.into()),
(Meta::default(), Token::Value("World".into())),
(Meta::default(), TokenKind::Semicolon.into()),
(Meta::default(), TokenKind::CurlyBClose.into()),
(Meta::default(), TokenKind::Dot.into()),
(Meta::default(), Token::Ident("world".into()))
],
Meta::default()
),
TokenInterpol::Literal {
span: Span::default(),
original: "!".into(),
content: "!".into()
}
]
}
],
"\
Interpol { multiline: false }
InterpolLiteral = \"Hello, \"
InterpolAst
IndexSet
Set
Token = CurlyBOpen
SetEntry
Attribute
Ident = world
Token = Assign
Value = \"World\"
Token = Semicolon
Token = CurlyBClose
Token = Dot
Ident = world
Token = CurlyBClose
InterpolLiteral = \"!\"
"
);
}
#[test]
fn index_set() {
assert_eq!(
[
Token::Ident("a".into()),
TokenKind::Dot, Token::Ident("b".into()),
TokenKind::Dot, Token::Ident("c".into())
],
"\
IndexSet
IndexSet
Ident = a
Token = Dot
Ident = b
Token = Dot
Ident = c
"
);
assert_eq!(
[
TokenKind::CurlyBOpen,
Token::Ident("a".into()),
TokenKind::Dot, Token::Ident("b".into()),
TokenKind::Dot, Token::Ident("c".into()),
TokenKind::Assign, Token::Value(1.into()), TokenKind::Semicolon,
TokenKind::CurlyBClose
],
"\
Set
Token = CurlyBOpen
SetEntry
Attribute
Ident = a
Token = Dot
Ident = b
Token = Dot
Ident = c
Token = Assign
Value = 1
Token = Semicolon
Token = CurlyBClose
"
);
assert_eq!(
[
Token::Ident("test".into()),
TokenKind::Dot, Token::Value("invalid ident".into()),
TokenKind::Dot, Token::Interpol {
multiline: false,
parts: vec![
TokenInterpol::Literal {
span: Span::default(),
original: "hi".into(),
content: "hi".into()
}
]
},
TokenKind::Dot, Token::Dynamic(
vec![(Meta::default(), Token::Ident("a".into()))],
Meta::default()
)
],
"\
IndexSet
IndexSet
IndexSet
Ident = test
Token = Dot
Value = \"invalid ident\"
Token = Dot
Interpol { multiline: false }
InterpolLiteral = \"hi\"
Token = Dot
Dynamic
Token = Dynamic
Ident = a
Token = CurlyBClose
"
);
}
#[test]
fn isset() {
assert_eq!(
[
Token::Ident("a".into()), TokenKind::Question, Token::Value("b".into()),
TokenKind::And, Token::Value(true.into())
],
"\
Operation
Operation
Ident = a
Token = Question
Value = \"b\"
Token = And
Value = true
"
);
assert_eq!(
[
Token::Ident("a".into()),
TokenKind::Dot, Token::Ident("b".into()),
TokenKind::Dot, Token::Ident("c".into()),
Token::Ident(OR.into()), Token::Value(1.into()),
TokenKind::Add, Token::Value(1.into())
],
"\
Operation
OrDefault
IndexSet
Ident = a
Token = Dot
Ident = b
Token = Dot
Ident = c
Ident = or
Value = 1
Token = Add
Value = 1
"
);
}
#[test]
fn merge() {
assert_eq!(
[
TokenKind::CurlyBOpen,
Token::Ident("a".into()), TokenKind::Assign, Token::Value(1.into()), TokenKind::Semicolon,
TokenKind::CurlyBClose,
TokenKind::Merge,
TokenKind::CurlyBOpen,
Token::Ident("b".into()), TokenKind::Assign, Token::Value(2.into()), TokenKind::Semicolon,
TokenKind::CurlyBClose
],
"\
Operation
Set
Token = CurlyBOpen
SetEntry
Attribute
Ident = a
Token = Assign
Value = 1
Token = Semicolon
Token = CurlyBClose
Token = Merge
Set
Token = CurlyBOpen
SetEntry
Attribute
Ident = b
Token = Assign
Value = 2
Token = Semicolon
Token = CurlyBClose
"
);
}
#[test]
fn with() {
assert_eq!(
[
TokenKind::With, Token::Ident("namespace".into()), TokenKind::Semicolon,
Token::Ident("expr".into())
],
"\
With
Token = With
Ident = namespace
Token = Semicolon
Ident = expr
"
);
}
#[test]
fn import() {
assert_eq!(
[
TokenKind::Import,
Token::Value(Value::Path(Anchor::Store, "nixpkgs".into())),
TokenKind::CurlyBOpen, TokenKind::CurlyBClose
],
"\
Apply
Import
Token = Import
Value = <nixpkgs>
Set
Token = CurlyBOpen
Token = CurlyBClose
"
);
}
#[test]
fn assert() {
assert_eq!(
[
TokenKind::Assert, Token::Ident("a".into()), TokenKind::Equal, Token::Ident("b".into()), TokenKind::Semicolon,
Token::Value("a == b".into())
],
"\
Assert
Token = Assert
Operation
Ident = a
Token = Equal
Ident = b
Token = Semicolon
Value = \"a == b\"
"
);
}
#[test]
fn inherit() {
assert_eq!(
[
TokenKind::CurlyBOpen,
Token::Ident("a".into()), TokenKind::Assign, Token::Value(1.into()), TokenKind::Semicolon,
TokenKind::Inherit, Token::Ident("b".into()), Token::Ident("c".into()), TokenKind::Semicolon,
TokenKind::Inherit, TokenKind::ParenOpen, Token::Ident("set".into()), TokenKind::ParenClose,
Token::Ident("c".into()), TokenKind::Semicolon,
TokenKind::CurlyBClose
],
"\
Set
Token = CurlyBOpen
SetEntry
Attribute
Ident = a
Token = Assign
Value = 1
Token = Semicolon
Inherit
Token = Inherit
Ident = b
Ident = c
Token = Semicolon
Inherit
Token = Inherit
InheritFrom
Token = ParenOpen
Ident = set
Token = ParenClose
Ident = c
Token = Semicolon
Token = CurlyBClose
"
);
}
#[test]
fn ifs() {
assert_eq!(
[
Token::Value(false.into()), TokenKind::Implication,
TokenKind::Invert, Token::Value(false.into()),
TokenKind::And,
Token::Value(false.into()), TokenKind::Equal, Token::Value(true.into()),
TokenKind::Or,
Token::Value(true.into())
],
"\
Operation
Value = false
Token = Implication
Operation
Operation
Unary
Token = Invert
Value = false
Token = And
Operation
Value = false
Token = Equal
Value = true
Token = Or
Value = true
"
);
assert_eq!(
[
Token::Value(1.into()), TokenKind::Less, Token::Value(2.into()),
TokenKind::Or,
Token::Value(2.into()), TokenKind::LessOrEq, Token::Value(2.into()),
TokenKind::And,
Token::Value(2.into()), TokenKind::More, Token::Value(1.into()),
TokenKind::And,
Token::Value(2.into()), TokenKind::MoreOrEq, Token::Value(2.into())
],
"\
Operation
Operation
Value = 1
Token = Less
Value = 2
Token = Or
Operation
Operation
Operation
Value = 2
Token = LessOrEq
Value = 2
Token = And
Operation
Value = 2
Token = More
Value = 1
Token = And
Operation
Value = 2
Token = MoreOrEq
Value = 2
"
);
assert_eq!(
[
Token::Value(1.into()), TokenKind::Equal, Token::Value(1.into()),
TokenKind::And,
Token::Value(2.into()), TokenKind::NotEqual, Token::Value(3.into())
],
"\
Operation
Operation
Value = 1
Token = Equal
Value = 1
Token = And
Operation
Value = 2
Token = NotEqual
Value = 3
"
);
assert_eq!(
[
TokenKind::If, Token::Value(false.into()), TokenKind::Then,
Token::Value(1.into()),
TokenKind::Else,
TokenKind::If, Token::Value(true.into()), TokenKind::Then,
Token::Value(2.into()),
TokenKind::Else,
Token::Value(3.into())
],
"\
IfElse
Token = If
Value = false
Token = Then
Value = 1
Token = Else
IfElse
Token = If
Value = true
Token = Then
Value = 2
Token = Else
Value = 3
"
);
}
#[test]
fn list() {
assert_eq!(
[
TokenKind::SquareBOpen,
Token::Ident("a".into()), Token::Value(2.into()), Token::Value(3.into()),
Token::Value("lol".into()),
TokenKind::SquareBClose
],
"\
List
Token = SquareBOpen
ListItem
Ident = a
ListItem
Value = 2
ListItem
Value = 3
ListItem
Value = \"lol\"
Token = SquareBClose
"
);
assert_eq!(
[
TokenKind::SquareBOpen, Token::Value(1.into()), TokenKind::SquareBClose, TokenKind::Concat,
TokenKind::SquareBOpen, Token::Value(2.into()), TokenKind::SquareBClose, TokenKind::Concat,
TokenKind::SquareBOpen, Token::Value(3.into()), TokenKind::SquareBClose
],
"\
Operation
Operation
List
Token = SquareBOpen
ListItem
Value = 1
Token = SquareBClose
Token = Concat
List
Token = SquareBOpen
ListItem
Value = 2
Token = SquareBClose
Token = Concat
List
Token = SquareBOpen
ListItem
Value = 3
Token = SquareBClose
"
);
}
#[test]
fn functions() {
assert_eq!(
[
Token::Ident("a".into()), TokenKind::Colon, Token::Ident("b".into()), TokenKind::Colon,
Token::Ident("a".into()), TokenKind::Add, Token::Ident("b".into())
],
"\
Lambda
Ident = a
Token = Colon
Lambda
Ident = b
Token = Colon
Operation
Ident = a
Token = Add
Ident = b
"
);
assert_eq!(
[
Token::Ident("a".into()), Token::Value(1.into()), Token::Value(2.into()),
TokenKind::Add,
Token::Value(3.into())
],
"\
Operation
Apply
Apply
Ident = a
Value = 1
Value = 2
Token = Add
Value = 3
"
);
}
#[test]
fn patterns() {
assert_eq!(
[TokenKind::CurlyBOpen, TokenKind::Ellipsis, TokenKind::CurlyBClose, TokenKind::Colon, Token::Value(1.into())],
"\
Lambda
Pattern
Token = CurlyBOpen
Token = Ellipsis
Token = CurlyBClose
Token = Colon
Value = 1
"
);
assert_eq!(
[
TokenKind::CurlyBOpen, TokenKind::CurlyBClose, TokenKind::At, Token::Ident("outer".into()),
TokenKind::Colon, Token::Value(1.into())
],
"\
Lambda
Pattern
Token = CurlyBOpen
Token = CurlyBClose
PatBind
Token = At
Ident = outer
Token = Colon
Value = 1
"
);
assert_eq!(
[
TokenKind::CurlyBOpen,
Token::Ident("a".into()), TokenKind::Comma,
Token::Ident("b".into()), TokenKind::Question, Token::Value("default".into()),
TokenKind::CurlyBClose,
TokenKind::Colon,
Token::Ident("a".into())
],
"\
Lambda
Pattern
Token = CurlyBOpen
PatEntry
Ident = a
Token = Comma
PatEntry
Ident = b
Token = Question
Value = \"default\"
Token = CurlyBClose
Token = Colon
Ident = a
"
);
assert_eq!(
[
TokenKind::CurlyBOpen,
Token::Ident("a".into()), TokenKind::Comma,
Token::Ident("b".into()), TokenKind::Question, Token::Value("default".into()), TokenKind::Comma,
TokenKind::Ellipsis,
TokenKind::CurlyBClose,
TokenKind::At,
Token::Ident("outer".into()),
TokenKind::Colon,
Token::Ident("outer".into())
],
"\
Lambda
Pattern
Token = CurlyBOpen
PatEntry
Ident = a
Token = Comma
PatEntry
Ident = b
Token = Question
Value = \"default\"
Token = Comma
Token = Ellipsis
Token = CurlyBClose
PatBind
Token = At
Ident = outer
Token = Colon
Ident = outer
"
);
assert_eq!(
[
Token::Ident("outer".into()), TokenKind::At,
TokenKind::CurlyBOpen, Token::Ident("a".into()), TokenKind::CurlyBClose,
TokenKind::Colon,
Token::Ident("outer".into())
],
"\
Lambda
Pattern
PatBind
Ident = outer
Token = At
Token = CurlyBOpen
PatEntry
Ident = a
Token = CurlyBClose
Token = Colon
Ident = outer
"
);
assert_eq!(
[
TokenKind::CurlyBOpen,
Token::Ident("a".into()), TokenKind::Question, TokenKind::CurlyBOpen, TokenKind::CurlyBClose,
TokenKind::CurlyBClose, TokenKind::Colon, Token::Ident("a".into())
],
"\
Lambda
Pattern
Token = CurlyBOpen
PatEntry
Ident = a
Token = Question
Set
Token = CurlyBOpen
Token = CurlyBClose
Token = CurlyBClose
Token = Colon
Ident = a
"
);
}
#[test]
fn dynamic() {
assert_eq!(
[Token::Dynamic(vec![(Meta::default(), Token::Ident("a".into()))], Meta::default())],
"\
Dynamic
Token = Dynamic
Ident = a
Token = CurlyBClose
"
);
}
}
|
use bigneon_db::models::Roles;
use functional::base::organization_invites;
#[cfg(test)]
mod create_tests {
use super::*;
#[test]
fn create_org_member() {
organization_invites::create(Roles::OrgMember, false);
}
#[test]
fn create_admin() {
organization_invites::create(Roles::Admin, true);
}
#[test]
fn create_user() {
organization_invites::create(Roles::User, false);
}
#[test]
fn create_org_owner() {
organization_invites::create(Roles::OrgOwner, true);
}
}
#[cfg(test)]
mod create_failure_missing_required_parameters_tests {
use super::*;
#[test]
fn create_failure_missing_required_parameters_org_member() {
organization_invites::create_failure_missing_required_parameters(Roles::OrgMember, false);
}
#[test]
fn create_failure_missing_required_parameters_admin() {
organization_invites::create_failure_missing_required_parameters(Roles::Admin, true);
}
#[test]
fn create_failure_missing_required_parameters_user() {
organization_invites::create_failure_missing_required_parameters(Roles::User, false);
}
#[test]
fn create_failure_missing_required_parameters_org_owner() {
organization_invites::create_failure_missing_required_parameters(Roles::OrgOwner, true);
}
}
#[cfg(test)]
mod create_for_existing_user_via_user_id_tests {
use super::*;
#[test]
fn create_for_existing_user_via_user_id_org_member() {
organization_invites::create_for_existing_user_via_user_id(Roles::OrgMember, false);
}
#[test]
fn create_for_existing_user_via_user_id_admin() {
organization_invites::create_for_existing_user_via_user_id(Roles::Admin, true);
}
#[test]
fn create_for_existing_user_via_user_id_user() {
organization_invites::create_for_existing_user_via_user_id(Roles::User, false);
}
#[test]
fn create_for_existing_user_via_user_id_org_owner() {
organization_invites::create_for_existing_user_via_user_id(Roles::OrgOwner, true);
}
}
#[cfg(test)]
mod create_for_new_user_tests {
use super::*;
#[test]
fn create_for_new_user_org_member() {
organization_invites::create_for_new_user(Roles::OrgMember, false);
}
#[test]
fn create_for_new_user_admin() {
organization_invites::create_for_new_user(Roles::Admin, true);
}
#[test]
fn create_for_new_user_user() {
organization_invites::create_for_new_user(Roles::User, false);
}
#[test]
fn create_for_new_user_org_owner() {
organization_invites::create_for_new_user(Roles::OrgOwner, true);
}
}
#[cfg(test)]
mod accept_tests {
use super::*;
#[test]
fn accept_org_member() {
organization_invites::accept_invite_status_of_invite(Roles::OrgMember, true);
}
#[test]
fn accept_admin() {
organization_invites::accept_invite_status_of_invite(Roles::Admin, true);
}
#[test]
fn accept_user() {
organization_invites::accept_invite_status_of_invite(Roles::User, true);
}
#[test]
fn accept_org_owner() {
organization_invites::accept_invite_status_of_invite(Roles::OrgOwner, true);
}
}
#[cfg(test)]
mod decline_tests {
use super::*;
#[test]
fn decline_org_member() {
organization_invites::decline_invite_status_of_invite(Roles::OrgMember, true);
}
#[test]
fn decline_admin() {
organization_invites::decline_invite_status_of_invite(Roles::Admin, true);
}
#[test]
fn decline_user() {
organization_invites::decline_invite_status_of_invite(Roles::User, true);
}
#[test]
fn decline_org_owner() {
organization_invites::decline_invite_status_of_invite(Roles::OrgOwner, true);
}
}
|
// Copyright 2021 Vladimir Komendantskiy
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::config::{ExporterConfig, Whitelist, CONFIG_FILE_NAME};
use crate::gauges::PrometheusGauges;
use crate::geolocation::api::MaxMindAPIKey;
use crate::geolocation::caching::{GeolocationCache, GEO_DB_CACHE_TREE_NAME};
use crate::persistent_database::{PersistentDatabase, DATABASE_FILE_NAME};
use crate::rewards::caching::{
RewardsCache, APY_TREE_NAME, EPOCH_LENGTH_TREE_NAME, EPOCH_REWARDS_TREE_NAME,
EPOCH_VOTER_APY_TREE_NAME,
};
use crate::rewards::RewardsMonitor;
use crate::slots::SkippedSlotsMonitor;
use anyhow::Context;
use clap::{load_yaml, App};
use log::{debug, warn};
use solana_client::rpc_client::RpcClient;
use std::fs::{create_dir_all, File};
use std::io::Write;
use std::net::SocketAddr;
use std::path::Path;
use std::{fs, time::Duration};
pub mod config;
pub mod gauges;
pub mod geolocation;
pub mod persistent_database;
pub mod rewards;
pub mod rpc_extra;
pub mod slots;
/// Name of directory where solana-exporter will store information
pub const EXPORTER_DATA_DIR: &str = ".solana-exporter";
/// Current version of `solana-exporter`
pub const SOLANA_EXPORTER_VERSION: &str = env!("CARGO_PKG_VERSION");
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::init();
// Read from CLI arguments
let yaml = load_yaml!("cli.yml");
let cli_configs = App::from_yaml(yaml).get_matches();
// Subcommands
match cli_configs.subcommand() {
("generate", Some(sc)) => {
let template_config = ExporterConfig {
rpc: "http://localhost:8899".to_string(),
target: SocketAddr::new("0.0.0.0".parse()?, 9179),
maxmind: Some(MaxMindAPIKey::new("username", "password")),
vote_account_whitelist: Some(Whitelist::default()),
staking_account_whitelist: Some(Whitelist::default()),
};
let location = sc
.value_of("output")
.map(|s| Path::new(s).to_path_buf())
.unwrap_or_else(|| {
dirs::home_dir()
.unwrap()
.join(EXPORTER_DATA_DIR)
.join(CONFIG_FILE_NAME)
});
// Only attempt to create .solana-exporter, if user specified location then don't try
// to create directories
if sc.value_of("output").is_none() {
create_dir_all(&location.parent().unwrap())?;
}
let mut file = File::create(location)?;
file.write_all(toml::to_string_pretty(&template_config)?.as_ref())?;
std::process::exit(0);
}
(_, _) => {}
}
let persistent_database = {
// Use override from CLI or default.
let location = cli_configs
.value_of("database")
.map(|s| Path::new(s).to_path_buf())
.unwrap_or_else(|| {
dirs::home_dir()
.unwrap()
.join(EXPORTER_DATA_DIR)
.join(DATABASE_FILE_NAME)
});
// Show warning if database not found, since sled will make a new file?
if !location.exists() {
warn!("Database could not found at specified location. A new one will be generated!")
}
PersistentDatabase::new(&location)
}?;
let config = {
// Use override from CLI or default.
let location = cli_configs
.value_of("config")
.map(|s| Path::new(s).to_path_buf())
.unwrap_or_else(|| {
dirs::home_dir()
.unwrap()
.join(EXPORTER_DATA_DIR)
.join(CONFIG_FILE_NAME)
});
let file_contents = fs::read_to_string(location).context(
"Could not find config file in specified location. \
If running for the first time, run `solana-exporter generate` to initialise the config file \
and then put real values there.",
)?;
toml::from_str::<ExporterConfig>(&file_contents)
}?;
let exporter = prometheus_exporter::start(config.target)?;
let duration = Duration::from_secs(1);
let client = RpcClient::new(config.rpc.clone());
let geolocation_cache =
GeolocationCache::new(persistent_database.tree(GEO_DB_CACHE_TREE_NAME)?);
let rewards_cache = RewardsCache::new(
persistent_database.tree(EPOCH_REWARDS_TREE_NAME)?,
persistent_database.tree(APY_TREE_NAME)?,
persistent_database.tree(EPOCH_LENGTH_TREE_NAME)?,
persistent_database.tree(EPOCH_VOTER_APY_TREE_NAME)?,
);
let vote_accounts_whitelist = config.vote_account_whitelist.unwrap_or_default();
let staking_account_whitelist = config.staking_account_whitelist.unwrap_or_default();
let gauges = PrometheusGauges::new(vote_accounts_whitelist.clone());
let mut skipped_slots_monitor =
SkippedSlotsMonitor::new(&client, &gauges.leader_slots, &gauges.skipped_slot_percent);
let mut rewards_monitor = RewardsMonitor::new(
&client,
&gauges.current_staking_apy,
&gauges.average_staking_apy,
&gauges.validator_rewards,
&rewards_cache,
&staking_account_whitelist,
&vote_accounts_whitelist,
);
loop {
let _guard = exporter.wait_duration(duration);
debug!("Updating metrics");
// Get metrics we need
let epoch_info = client.get_epoch_info()?;
let nodes = client.get_cluster_nodes()?;
let vote_accounts = client.get_vote_accounts()?;
let node_whitelist = rpc_extra::node_pubkeys(&vote_accounts_whitelist, &vote_accounts);
gauges
.export_vote_accounts(&vote_accounts)
.context("Failed to export vote account metrics")?;
gauges
.export_epoch_info(&epoch_info, &client)
.context("Failed to export epoch info metrics")?;
gauges.export_nodes_info(&nodes, &client, &node_whitelist)?;
if let Some(maxmind) = config.maxmind.clone() {
// If the MaxMind API is configured, submit queries for any uncached IPs.
gauges
.export_ip_addresses(
&nodes,
&vote_accounts,
&geolocation_cache,
&maxmind,
&node_whitelist,
)
.await
.context("Failed to export IP address info metrics")?;
}
skipped_slots_monitor
.export_skipped_slots(&epoch_info, &node_whitelist)
.context("Failed to export skipped slots")?;
rewards_monitor
.export_rewards(&epoch_info)
.context("Failed to export rewards")?;
}
}
|
#[doc = "Reader of register CALM0"]
pub type R = crate::R<u32, super::CALM0>;
#[doc = "Writer for register CALM0"]
pub type W = crate::W<u32, super::CALM0>;
#[doc = "Register CALM0 `reset()`'s with value 0"]
impl crate::ResetValue for super::CALM0 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `SEC`"]
pub type SEC_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `SEC`"]
pub struct SEC_W<'a> {
w: &'a mut W,
}
impl<'a> SEC_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x3f) | ((value as u32) & 0x3f);
self.w
}
}
#[doc = "Reader of field `MIN`"]
pub type MIN_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `MIN`"]
pub struct MIN_W<'a> {
w: &'a mut W,
}
impl<'a> MIN_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x3f << 8)) | (((value as u32) & 0x3f) << 8);
self.w
}
}
#[doc = "Reader of field `HR`"]
pub type HR_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `HR`"]
pub struct HR_W<'a> {
w: &'a mut W,
}
impl<'a> HR_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 16)) | (((value as u32) & 0x1f) << 16);
self.w
}
}
#[doc = "Reader of field `AMPM`"]
pub type AMPM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `AMPM`"]
pub struct AMPM_W<'a> {
w: &'a mut W,
}
impl<'a> AMPM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 22)) | (((value as u32) & 0x01) << 22);
self.w
}
}
impl R {
#[doc = "Bits 0:5 - Seconds"]
#[inline(always)]
pub fn sec(&self) -> SEC_R {
SEC_R::new((self.bits & 0x3f) as u8)
}
#[doc = "Bits 8:13 - Minutes"]
#[inline(always)]
pub fn min(&self) -> MIN_R {
MIN_R::new(((self.bits >> 8) & 0x3f) as u8)
}
#[doc = "Bits 16:20 - Hours"]
#[inline(always)]
pub fn hr(&self) -> HR_R {
HR_R::new(((self.bits >> 16) & 0x1f) as u8)
}
#[doc = "Bit 22 - AM/PM Designation"]
#[inline(always)]
pub fn ampm(&self) -> AMPM_R {
AMPM_R::new(((self.bits >> 22) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:5 - Seconds"]
#[inline(always)]
pub fn sec(&mut self) -> SEC_W {
SEC_W { w: self }
}
#[doc = "Bits 8:13 - Minutes"]
#[inline(always)]
pub fn min(&mut self) -> MIN_W {
MIN_W { w: self }
}
#[doc = "Bits 16:20 - Hours"]
#[inline(always)]
pub fn hr(&mut self) -> HR_W {
HR_W { w: self }
}
#[doc = "Bit 22 - AM/PM Designation"]
#[inline(always)]
pub fn ampm(&mut self) -> AMPM_W {
AMPM_W { w: self }
}
}
|
use utilities::prelude::*;
use crate::impl_vk_handle;
use crate::prelude::*;
use std::slice;
use std::sync::Arc;
#[derive(Debug)]
pub struct DescriptorWrite {
binding: u32,
descriptor_type: VkDescriptorType,
inner: InnerWrite,
}
#[derive(Debug)]
enum InnerWrite {
Buffers(Vec<VkDescriptorBufferInfo>),
Images(Vec<VkDescriptorImageInfo>),
AS(
(
VkWriteDescriptorSetAccelerationStructureNV,
Vec<VkAccelerationStructureNV>,
),
),
}
impl DescriptorWrite {
pub fn uniform_buffers<T>(binding: u32, buffers: &[&Arc<Buffer<T>>]) -> Self {
DescriptorWrite {
binding,
descriptor_type: VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
inner: InnerWrite::Buffers(
buffers
.iter()
.map(|buffer| VkDescriptorBufferInfo {
buffer: buffer.vk_handle(),
offset: 0,
range: buffer.byte_size(),
})
.collect(),
),
}
}
pub fn storage_buffers<T>(binding: u32, buffers: &[&Arc<Buffer<T>>]) -> Self {
DescriptorWrite {
binding,
descriptor_type: VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
inner: InnerWrite::Buffers(
buffers
.iter()
.map(|buffer| VkDescriptorBufferInfo {
buffer: buffer.vk_handle(),
offset: 0,
range: buffer.byte_size(),
})
.collect(),
),
}
}
pub fn combined_samplers(binding: u32, images: &[&Arc<Image>]) -> Self {
DescriptorWrite {
binding,
descriptor_type: VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
inner: InnerWrite::Images(
images
.iter()
.map(|image| VkDescriptorImageInfo {
sampler: image
.sampler()
.as_ref()
.expect("image has no sampler attached")
.vk_handle(),
imageView: image.vk_handle(),
imageLayout: image.image_layout().expect("image layout lock error"),
})
.collect(),
),
}
}
pub fn storage_images(binding: u32, images: &[&Arc<Image>]) -> Self {
DescriptorWrite {
binding,
descriptor_type: VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
inner: InnerWrite::Images(
images
.iter()
.map(|image| VkDescriptorImageInfo {
sampler: VkSampler::NULL_HANDLE,
imageView: image.vk_handle(),
imageLayout: image.image_layout().expect("image layout lock error"),
})
.collect(),
),
}
}
pub fn acceleration_structures(
binding: u32,
acceleration_structures: &[&Arc<AccelerationStructure>],
) -> Self {
let vk_as: Vec<VkAccelerationStructureNV> = acceleration_structures
.iter()
.map(|a| a.vk_handle())
.collect();
let mut write = DescriptorWrite {
binding,
descriptor_type: VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV,
inner: InnerWrite::AS((
VkWriteDescriptorSetAccelerationStructureNV::default(),
vk_as,
)),
};
if let InnerWrite::AS((vk_write_as, vk_as)) = &mut write.inner {
vk_write_as.set_acceleration_structures(&vk_as);
}
write
}
pub fn change_image_layout(mut self, image_layout: VkImageLayout) -> Self {
if let InnerWrite::Images(ref mut infos) = self.inner {
for info in infos {
info.imageLayout = image_layout;
}
}
self
}
fn vk_write(&self, write: &mut VkWriteDescriptorSet) {
match &self.inner {
InnerWrite::Buffers(buffer_infos) => {
write.set_buffer_infos(buffer_infos);
}
InnerWrite::Images(image_infos) => {
write.set_image_infos(image_infos);
}
InnerWrite::AS((as_write, _)) => {
write.descriptorCount = as_write.accelerationStructureCount;
write.chain(as_write);
}
}
}
}
pub struct DescriptorSetBuilder {
device: Arc<Device>,
descriptor_pool: Arc<DescriptorPool>,
variable_desc_counts: Vec<u32>,
variable_descriptor_count: VkDescriptorSetVariableDescriptorCountAllocateInfoEXT,
}
impl DescriptorSetBuilder {
pub fn set_variable_descriptor_counts(mut self, descriptor_counts: &[u32]) -> Self {
self.variable_desc_counts = descriptor_counts.to_vec();
self
}
pub fn allocate(mut self) -> VerboseResult<Arc<DescriptorSet>> {
let layout = self.descriptor_pool.vk_handle();
let mut descriptor_set_ci = VkDescriptorSetAllocateInfo::new(
self.descriptor_pool.vk_handle(),
slice::from_ref(&layout),
);
if !self.variable_desc_counts.is_empty() {
self.variable_descriptor_count
.set_descriptor_counts(&self.variable_desc_counts);
descriptor_set_ci.chain(&self.variable_descriptor_count);
}
let descriptor_set = self.device.allocate_descriptor_sets(&descriptor_set_ci)?[0];
Ok(Arc::new(DescriptorSet {
device: self.device,
pool: self.descriptor_pool,
descriptor_set,
}))
}
}
#[derive(Debug)]
pub struct DescriptorSet {
device: Arc<Device>,
pool: Arc<DescriptorPool>,
descriptor_set: VkDescriptorSet,
}
impl DescriptorSet {
pub(crate) fn builder(
device: Arc<Device>,
descriptor_pool: Arc<DescriptorPool>,
) -> DescriptorSetBuilder {
DescriptorSetBuilder {
device,
descriptor_pool,
variable_desc_counts: Vec::new(),
variable_descriptor_count: VkDescriptorSetVariableDescriptorCountAllocateInfoEXT::new(
&[],
),
}
}
// TODO: add update function for VkCopyDescriptorSet
pub fn update(&self, writes: &[DescriptorWrite]) {
debug_assert!(!writes.is_empty());
let mut vk_writes = Vec::new();
for write in writes {
let mut write_desc = VkWriteDescriptorSet::new(
self.descriptor_set,
write.binding,
0,
write.descriptor_type,
);
write.vk_write(&mut write_desc);
vk_writes.push(write_desc);
}
self.device
.update_descriptor_sets(vk_writes.as_slice(), &[]);
}
}
impl VulkanDevice for DescriptorSet {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(DescriptorSet, VkDescriptorSet, descriptor_set);
impl VkHandle<VkDescriptorSetLayout> for DescriptorSet {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.pool.vk_handle()
}
}
impl<'a> VkHandle<VkDescriptorSetLayout> for &'a DescriptorSet {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.pool.vk_handle()
}
}
impl VkHandle<VkDescriptorSetLayout> for Arc<DescriptorSet> {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.pool.vk_handle()
}
}
impl<'a> VkHandle<VkDescriptorSetLayout> for &'a Arc<DescriptorSet> {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.pool.vk_handle()
}
}
impl Drop for DescriptorSet {
fn drop(&mut self) {
if let Err(error) = self
.device
.free_descriptor_sets(self.pool.vk_handle(), &[self.descriptor_set])
{
println!("{}", error);
}
}
}
|
use pulse_sys as pulse;
use std::fmt;
macro_rules! decl_enum {
(
$(#[doc = $doc:literal])*
#[repr($ty:ident)]
$vis:vis enum $name:ident {
$(
$(#[$m:meta])*
$a:ident = $b:ident
),* $(,)?
}
) => {
$(#[doc = $doc])*
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[non_exhaustive]
#[repr($ty)]
$vis enum $name {
$(
$(#[$m])*
#[allow(missing_docs)]
$a = pulse::$b,
)*
}
impl $name {
/// Parse the given enum from a value.
$vis fn from_value(value: $ty) -> Option<Self> {
Some(match value {
$(pulse::$b => Self::$a,)*
_ => return None,
})
}
}
impl fmt::Display for $name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let id = match self {
$(Self::$a => stringify!($a),)*
};
f.write_str(id)
}
}
}
}
decl_enum! {
/// The state of a connection context.
#[repr(u32)]
pub enum ContextState {
/// The context hasn't been connected yet.
Unconnected = PA_CONTEXT_UNCONNECTED,
/// A connection is being established.
Connecting = PA_CONTEXT_CONNECTING,
/// The client is authorizing itself to the daemon.
Authorizing = PA_CONTEXT_AUTHORIZING,
/// The client is passing its application name to the daemon.
SettingName = PA_CONTEXT_SETTING_NAME,
/// The connection is established, the context is ready to execute operations.
Ready = PA_CONTEXT_READY,
/// The connection failed or was disconnected.
Failed = PA_CONTEXT_FAILED,
/// The connection was terminated cleanly.
Terminated = PA_CONTEXT_TERMINATED,
}
}
|
#[macro_use]
extern crate criterion;
#[macro_use]
extern crate bbs;
extern crate amcl_wrapper;
use amcl_wrapper::field_elem::FieldElementVector;
use bbs::prelude::*;
use std::collections::BTreeMap;
//use zmix::signatures::SignatureMessageVector;
use criterion::{Criterion, BenchmarkId};
fn keygen_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("BBS+ Key Generation");
for atts in vec![1, 2, 5, 10, 20, 50, 100, 200, 1000] {
group.bench_function(BenchmarkId::new("Attributes", atts), move |b| {
b.iter(|| Issuer::new_keys(atts))
});
// c.bench_function(format!("create ps key for {}", atts).as_str(), move |b| {
// let params = Params::new(format!("create ps key for {}", atts).as_bytes());
// b.iter(|| ps_keys_generate(atts, ¶ms))
// });
}
group.finish()
}
fn deterministic_keygen_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("BBS+ Deterministic Key Generation");
let (dpk, sk) = Issuer::new_short_keys(None);
for atts in vec![1, 2, 5, 10, 20, 50, 100, 200, 1000] {
group.bench_function(BenchmarkId::new("Attributes", atts), move |b| {
b.iter(|| dpk.to_public_key(atts).unwrap())
});
// c.bench_function(format!("create ps key for {}", atts).as_str(), move |b| {
// let params = Params::new(format!("create ps key for {}", atts).as_bytes());
// b.iter(|| ps_keys_generate(atts, ¶ms))
// });
}
}
fn full_issuance_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("BBS+ Issue Credential");
for atts in vec![1, 2, 5, 10, 20, 50, 100] {
let (pk, sk) = Issuer::new_keys(atts).unwrap();
let value = "0".repeat(32);
group.bench_function(BenchmarkId::new("Attributes", atts), |b| {
b.iter(|| {
let signing_nonce = Issuer::generate_signing_nonce();
// TODO Need to add communication time and space complexity
let link_secret = Prover::new_link_secret();
let mut blind_messages = BTreeMap::new();
blind_messages.insert(0, link_secret.clone());
let (ctx, signature_blinding) =
Prover::new_blind_signature_context(&pk, &blind_messages, &signing_nonce).unwrap();
let mut messages = BTreeMap::new();
for x in 1..atts {
messages.insert(x,SignatureMessage::hash(value.as_bytes()));
}
// Send `ctx` to signer
// let attributes = FieldElementVector::random(atts);
let blind_signature = Issuer::blind_sign(&ctx, &messages, &sk, &pk, &signing_nonce).unwrap();
let mut msgs = messages
.iter()
.map(|(_, m)| m.clone())
.collect::<Vec<SignatureMessage>>();
msgs.insert(0, link_secret.clone());
Prover::complete_signature(&pk, msgs.as_slice(), &blind_signature, &signature_blinding).unwrap();
})
});
}
group.finish()
}
fn issuance_attribute_size(c: &mut Criterion) {
let mut group = c.benchmark_group("BBS+ Issue Credential");
for size in vec![128, 512, 1024, 16*1024, 64*1024, 128*1024, 256*1024] {
let (pk, sk) = Issuer::new_keys(2).unwrap();
let value = "0".repeat(size);
group.bench_function(BenchmarkId::new("Attribute Size", size), |b| {
b.iter(|| {
let signing_nonce = Issuer::generate_signing_nonce();
// TODO Need to add communication time and space complexity
let link_secret = Prover::new_link_secret();
let mut blind_messages = BTreeMap::new();
blind_messages.insert(0, link_secret.clone());
let (ctx, signature_blinding) =
Prover::new_blind_signature_context(&pk, &blind_messages, &signing_nonce).unwrap();
let mut messages = BTreeMap::new();
messages.insert(1,SignatureMessage::hash(value.as_bytes()));
// Send `ctx` to signer
// let attributes = FieldElementVector::random(atts);
let blind_signature = Issuer::blind_sign(&ctx, &messages, &sk, &pk, &signing_nonce).unwrap();
let mut msgs = messages
.iter()
.map(|(_, m)| m.clone())
.collect::<Vec<SignatureMessage>>();
msgs.insert(0, link_secret.clone());
Prover::complete_signature(&pk, msgs.as_slice(), &blind_signature, &signature_blinding).unwrap();
})
});
}
group.finish()
}
fn bbs_prove_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("BBS+ Present Proof");
let value = "0".repeat(32);
for atts in vec![1, 2, 5, 10, 20, 50, 100] {
////////////////////////// BBS+ Signatures
let (pk, sk) = Issuer::new_keys(atts).unwrap();
let signing_nonce = Issuer::generate_signing_nonce();
// TODO Need to add communication time and space complexity
let link_secret = Prover::new_link_secret();
let mut blind_messages = BTreeMap::new();
blind_messages.insert(0, link_secret.clone());
let (ctx, signature_blinding) =
Prover::new_blind_signature_context(&pk, &blind_messages, &signing_nonce).unwrap();
let mut messages = BTreeMap::new();
for x in 1..atts {
messages.insert(x,SignatureMessage::hash(value.as_bytes()));
}
// Send `ctx` to signer
// let attributes = FieldElementVector::random(atts);
let blind_signature = Issuer::blind_sign(&ctx, &messages, &sk, &pk, &signing_nonce).unwrap();
let mut msgs = messages
.iter()
.map(|(_, m)| m.clone())
.collect::<Vec<SignatureMessage>>();
msgs.insert(0, link_secret.clone());
let res = Prover::complete_signature(&pk, msgs.as_slice(), &blind_signature, &signature_blinding);
let signature = res.unwrap();
group.bench_function( BenchmarkId::new("Attributes", atts), |b| {
b.iter(|| {
let revealed_indicies: Vec<usize> = (1..atts).map(|v| v).collect();
let nonce = Verifier::generate_proof_nonce();
let proof_request = Verifier::new_proof_request(&revealed_indicies, &pk).unwrap();
let link_hidden = ProofMessage::Hidden(HiddenMessage::ExternalBlinding(
link_secret.clone(),
nonce.clone(),
));
let mut proof_messages = vec![
link_hidden,
];
for x in 1..atts {
proof_messages.push(pm_revealed!(value.as_bytes()));
}
let pok = Prover::commit_signature_pok(&proof_request, proof_messages.as_slice(), &signature)
.unwrap();
let challenge = Prover::create_challenge_hash(&[pok.clone()], None, &nonce).unwrap();
let proof = Prover::generate_signature_pok(pok, &challenge).unwrap();
let verification = Verifier::verify_signature_pok(&proof_request, &proof, &nonce);
})
});
}
group.finish();
}
fn bbs_prove__size_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("BBS+ Present Proof");
let value = "0".repeat(32);
for size in vec![128, 512, 1024, 16*1024, 64*1024, 128*1024, 256*1024] {
let (pk, sk) = Issuer::new_keys(2).unwrap();
let signing_nonce = Issuer::generate_signing_nonce();
// TODO Need to add communication time and space complexity
let link_secret = Prover::new_link_secret();
let mut blind_messages = BTreeMap::new();
blind_messages.insert(0, link_secret.clone());
let (ctx, signature_blinding) =
Prover::new_blind_signature_context(&pk, &blind_messages, &signing_nonce).unwrap();
let mut messages = BTreeMap::new();
messages.insert(1,SignatureMessage::hash(value.as_bytes()));
// Send `ctx` to signer
// let attributes = FieldElementVector::random(atts);
let blind_signature = Issuer::blind_sign(&ctx, &messages, &sk, &pk, &signing_nonce).unwrap();
let mut msgs = messages
.iter()
.map(|(_, m)| m.clone())
.collect::<Vec<SignatureMessage>>();
msgs.insert(0, link_secret.clone());
let res = Prover::complete_signature(&pk, msgs.as_slice(), &blind_signature, &signature_blinding);
let signature = res.unwrap();
group.bench_function( BenchmarkId::new("Attribute Size", size), |b| {
b.iter(|| {
let revealed_indicies: Vec<usize> = vec![1];
let nonce = Verifier::generate_proof_nonce();
let proof_request = Verifier::new_proof_request(&revealed_indicies, &pk).unwrap();
let link_hidden = ProofMessage::Hidden(HiddenMessage::ExternalBlinding(
link_secret.clone(),
nonce.clone(),
));
let mut proof_messages = vec![
link_hidden,
];
proof_messages.push(pm_revealed!(value.as_bytes()));
let pok = Prover::commit_signature_pok(&proof_request, proof_messages.as_slice(), &signature)
.unwrap();
let challenge = Prover::create_challenge_hash(&[pok.clone()], None, &nonce).unwrap();
let proof = Prover::generate_signature_pok(pok, &challenge).unwrap();
let verification = Verifier::verify_signature_pok(&proof_request, &proof, &nonce);
})
});
}
group.finish();
}
criterion_group!(
name = bench_bbs;
config = Criterion::default();
targets = keygen_benchmark, deterministic_keygen_benchmark, full_issuance_benchmark, issuance_attribute_size, bbs_prove_benchmark, bbs_prove__size_benchmark
);
criterion_main!(bench_bbs);
|
#[doc = "Reader of register RX_LPI_TRAN_CNTR"]
pub type R = crate::R<u32, super::RX_LPI_TRAN_CNTR>;
#[doc = "Reader of field `RXLPITRC`"]
pub type RXLPITRC_R = crate::R<u32, u32>;
impl R {
#[doc = "Bits 0:31 - Rx LPI Transition counter"]
#[inline(always)]
pub fn rxlpitrc(&self) -> RXLPITRC_R {
RXLPITRC_R::new((self.bits & 0xffff_ffff) as u32)
}
}
|
use crate::cli_state::CliState;
use crate::StarcoinOpt;
use anyhow::{bail, Result};
use scmd::{CommandAction, ExecContext};
use starcoin_crypto::hash::{CryptoHash, HashValue};
use starcoin_rpc_client::RemoteStateReader;
use starcoin_state_api::AccountStateReader;
use starcoin_types::account_address::AccountAddress;
use starcoin_types::account_config;
use starcoin_types::language_storage::TypeTag;
use starcoin_types::transaction::{
parse_as_transaction_argument, RawUserTransaction, Script, TransactionArgument,
};
use starcoin_vm_runtime::type_tag_parser::parse_type_tags;
use std::fs::OpenOptions;
use std::io::Read;
use std::time::Duration;
use structopt::StructOpt;
use vm as move_vm;
#[derive(Debug, StructOpt)]
#[structopt(name = "execute")]
pub struct ExecuteOpt {
#[structopt(name = "account_address", short = "a", long = "address")]
account_address: AccountAddress,
#[structopt(
short = "f",
name = "bytecode_file",
help = "script bytecode file path"
)]
bytecode_file: String,
#[structopt(
short = "t",
long = "type_tag",
name = "type-tag",
help = "can specify multi type_tag"
)]
type_tags: Vec<String>,
#[structopt(long="arg", name="transaction-arg", help ="can specify multi arg", parse(try_from_str = parse_as_transaction_argument))]
args: Vec<TransactionArgument>,
#[structopt(
short = "g",
name = "max-gas-amount",
help = "max gas used to deploy the module"
)]
max_gas_amount: u64,
}
pub struct ExecuteCommand;
impl CommandAction for ExecuteCommand {
type State = CliState;
type GlobalOpt = StarcoinOpt;
type Opt = ExecuteOpt;
type ReturnItem = HashValue;
fn run(
&self,
ctx: &ExecContext<Self::State, Self::GlobalOpt, Self::Opt>,
) -> Result<Self::ReturnItem> {
let opt = ctx.opt();
let bytecode_path = ctx.opt().bytecode_file.clone();
let mut file = OpenOptions::new()
.read(true)
.write(false)
.open(bytecode_path)?;
let mut bytecode = vec![];
file.read_to_end(&mut bytecode)?;
let _compiled_script =
match move_vm::file_format::CompiledScript::deserialize(bytecode.as_slice()) {
Err(e) => {
bail!("invalid bytecode file, cannot deserialize as script, {}", e);
}
Ok(s) => s,
};
let mut type_tags = vec![];
for type_tag in &opt.type_tags {
type_tags.extend(
parse_type_tags(type_tag.as_ref())?
.into_iter()
.map(|t| TypeTag::from(t)),
);
}
let args = opt.args.clone();
let txn_address = opt.account_address;
let client = ctx.state().client();
let chain_state_reader = RemoteStateReader::new(client);
let account_state_reader = AccountStateReader::new(&chain_state_reader);
let account_resource = account_state_reader.get_account_resource(&txn_address)?;
if account_resource.is_none() {
bail!("address {} not exists on chain", &txn_address);
}
let account_resource = account_resource.unwrap();
let script_txn = RawUserTransaction::new_script(
txn_address,
account_resource.sequence_number(),
Script::new(bytecode, type_tags, args),
opt.max_gas_amount,
1,
account_config::starcoin_type_tag(),
Duration::from_secs(60 * 5),
);
let signed_txn = client.wallet_sign_txn(script_txn)?;
let txn_hash = CryptoHash::crypto_hash(&signed_txn);
let succ = client.submit_transaction(signed_txn)?;
if succ {
Ok(txn_hash)
} else {
bail!("execute-txn is reject by node")
}
}
}
|
#![feature(box_syntax, box_patterns)]
extern crate image;
extern crate num;
use num::complex::{Complex, Complex64};
use std::f64::consts::PI;
use std::fs::File;
use std::path::Path;
const EPSILON: f64 = 1e-10;
const MAX_ITERS: usize = 100;
#[derive(Copy,Clone,Debug)]
struct NewtonsMethodResult {
root: Complex64,
iterations: usize,
}
fn equal_within(x: f64, y: f64, eps: f64) -> bool {
(x - y).abs() < eps
}
fn newtons_method<F1, F2>(f: F1, f_prime: F2, z0: Complex64) -> Option<NewtonsMethodResult>
where F1: Fn(Complex64) -> Complex64,
F2: Fn(Complex64) -> Complex64 {
let func_to_iterate = |z| z - f(z)/f_prime(z);
let mut z_prev;
let mut z = z0;
for iters in 1..MAX_ITERS+1 {
z_prev = z;
z = func_to_iterate(z);
if equal_within((z-z_prev).norm_sqr().sqrt(), 0.0, EPSILON/2.0) {
return Some(NewtonsMethodResult {
root: z,
iterations: iters,
});
}
}
None
}
fn image_coords_to_complex(image_x: usize, image_y: usize,
view_center: Complex64, view_width: f64, view_height: f64,
img_width: usize, img_height: usize) -> Complex64 {
Complex64::new(0.0, 0.0)
}
fn complex_grid(center: Complex64,
width: f64, height: f64,
width_res: usize, height_res: usize) -> Vec<Vec<Complex64>> {
let mut grid: Vec<Vec<Complex64>> = Vec::with_capacity(height_res);
for y in 0..height_res {
grid.push(Vec::with_capacity(width_res));
for x in 0..width_res {
let re_part = center.re - width/2.0 + width*(x as f64)/((width_res - 1) as f64);
let im_part = center.im + height/2.0 - height*(y as f64)/((height_res - 1) as f64);
grid.last_mut().unwrap().push(Complex64::new(re_part, im_part));
}
}
grid
}
fn roots_of_unity(n: usize) -> Vec<Complex64> {
let mut roots: Vec<Complex64> = Vec::with_capacity(n);
if n == 0 {
return roots;
}
for i in 0..n {
roots.push(Complex64::from_polar(&1.0, &(2.0*PI*(i as f64)/(n as f64))));
}
roots
}
fn polynomial_from_roots(roots: &Vec<Complex64>) -> (Box<Fn(Complex64) -> Complex64>, Box<Fn(Complex64) -> Complex64>) {
let roots_clone = roots.clone();
let f = move |z: Complex64| -> Complex64 {
let mut res = Complex64::new(1.0, 0.0);
for root in &roots_clone {
res = res * (z - root);
}
res
};
let roots_clone_2 = roots.clone();
let f_prime = move |z: Complex64| -> Complex64 {
let mut res = Complex64::new(0.0, 0.0);
for excluded_root in &roots_clone_2 {
let mut partial_sum = Complex64::new(1.0, 0.0);
for root in &roots_clone_2 {
if root != excluded_root {
partial_sum = partial_sum * (z - root);
}
}
res = res + partial_sum;
}
res
};
(box f, box f_prime)
}
fn main() {
let img_width = 3000;
let img_height = 3000;
let roots = roots_of_unity(3);
let (f, f_prime) = polynomial_from_roots(&roots);
let grid = complex_grid(Complex64::new(0.0, 0.0), 4.0, 4.0, img_width, img_height);
let mut imgbuf = image::ImageBuffer::new(img_width as u32, img_height as u32);
// Iterate over the coordiantes and pixels of the image
for (x, y, pixel) in imgbuf.enumerate_pixels_mut() {
let result = newtons_method(&*f, &*f_prime, grid[y as usize][x as usize]);
if result.is_none() {
println!("({:?}) did not converge", grid[y as usize][x as usize]);
continue;
}
*pixel = image::Rgb([(result.unwrap().iterations as u8)*5, 0, 0]);
}
// Save the image as “fractal.png”
let ref mut fout = File::create(&Path::new("fractal.png")).unwrap();
// We must indicate the image’s color type and what format to save as
image::ImageRgb8(imgbuf).save(fout, image::PNG);
println!("{:?}\n{}", roots, roots.len());
println!("done!");
}
|
use super::minimax::*;
use super::board::*;
#[test]
fn get_score_gets_scores() {
let empty_board = Board::new();
assert_eq!(get_score(&empty_board), 0);
let mut x_won_board = Board::new();
x_won_board.set_spaces(vec![0, 1, 2], 1);
assert_eq!(get_score(&x_won_board), 10);
let mut o_won_board = Board::new();
o_won_board.set_spaces(vec![2, 5, 8], 2);
assert_eq!(get_score(&o_won_board), -10);
}
#[test]
fn get_token_for_empty_board_returns_X() {
let board = Board::new();
assert_eq!(get_token(&board), 1); // X goes first
}
#[test]
fn get_token_returns_O_when_its_Os_turn() {
let mut board = Board::new();
board.set_spaces(vec![0, 1, 5], 1);
board.set_spaces(vec![2, 3], 2);
assert_eq!(get_token(&board), 2); // O's turn
}
#[test]
fn find_max_and_find_min() {
let scores = vec![1, -25, 3, 134, 0];
assert_eq!(find_max_index(&scores), 3);
assert_eq!(find_min_index(&scores), 1);
}
#[test]
fn minimax_returns_tuple_of_space_to_score() {
let mut board = Board::new();
board.set_spaces(vec![6, 8], 1);
board.set_spaces(vec![0, 4], 2);
// 2 | 0 | 0
// ---+---+---
// 0 | 2 | 0
// ---+---+---
// 1 | 0 | 1
let space_to_score = minimax(board);
let space = match space_to_score.0 {
Ok(x) => x,
Err(e) => panic!(e),
};
let score = space_to_score.1;
assert_eq!(space, 7);
assert_eq!(score, 9);
}
#[test]
fn minimax_returns_Err_to_score_if_game_is_over() {
let mut board = Board::new();
board.set_spaces(vec![0, 1, 2], 1);
let space_to_score = minimax(board);
let space = match space_to_score.0 {
Ok(x) => x,
Err(e) => 999,
};
let score = space_to_score.1;
assert_eq!(space, 999);
assert_eq!(score, 10);
}
|
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[link(name = "windows")]
extern "system" {}
#[repr(transparent)]
pub struct AccountPictureKind(pub i32);
impl AccountPictureKind {
pub const SmallImage: Self = Self(0i32);
pub const LargeImage: Self = Self(1i32);
pub const Video: Self = Self(2i32);
}
impl ::core::marker::Copy for AccountPictureKind {}
impl ::core::clone::Clone for AccountPictureKind {
fn clone(&self) -> Self {
*self
}
}
pub type AdvertisingManagerForUser = *mut ::core::ffi::c_void;
pub type AssignedAccessSettings = *mut ::core::ffi::c_void;
pub type DiagnosticsSettings = *mut ::core::ffi::c_void;
pub type FirstSignInSettings = *mut ::core::ffi::c_void;
pub type GlobalizationPreferencesForUser = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct SetAccountPictureResult(pub i32);
impl SetAccountPictureResult {
pub const Success: Self = Self(0i32);
pub const ChangeDisabled: Self = Self(1i32);
pub const LargeOrDynamicError: Self = Self(2i32);
pub const VideoFrameSizeError: Self = Self(3i32);
pub const FileSizeError: Self = Self(4i32);
pub const Failure: Self = Self(5i32);
}
impl ::core::marker::Copy for SetAccountPictureResult {}
impl ::core::clone::Clone for SetAccountPictureResult {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct SetImageFeedResult(pub i32);
impl SetImageFeedResult {
pub const Success: Self = Self(0i32);
pub const ChangeDisabled: Self = Self(1i32);
pub const UserCanceled: Self = Self(2i32);
}
impl ::core::marker::Copy for SetImageFeedResult {}
impl ::core::clone::Clone for SetImageFeedResult {
fn clone(&self) -> Self {
*self
}
}
pub type UserProfilePersonalizationSettings = *mut ::core::ffi::c_void;
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
mod abomonation_cache;
mod errors;
mod lrucache;
pub use crate::abomonation_cache::*;
pub use crate::errors::*;
pub use crate::lrucache::*;
// export Abomonation so that users of this crate don't need to add abomination as dependency
pub use abomonation::Abomonation;
#[cxx::bridge(namespace = "facebook::rust::cachelib")]
mod ffi {
#[namespace = "std::chrono"]
extern "C++" {
type milliseconds = stdchrono::milliseconds;
}
#[namespace = "folly"]
extern "C++" {
type StringPiece<'a> = folly::StringPiece<'a>;
}
unsafe extern "C++" {
include!("cachelib/rust/src/cachelib.h");
#[namespace = "facebook::cachelib"]
type CacheAdmin;
fn make_cacheadmin(
cache: Pin<&mut LruAllocator>,
oncall: &CxxString,
) -> Result<UniquePtr<CacheAdmin>>;
#[namespace = "facebook::cachelib"]
type LruAllocator;
fn make_lru_allocator(
config: UniquePtr<LruAllocatorConfig>,
) -> Result<UniquePtr<LruAllocator>>;
fn make_shm_lru_allocator(
config: UniquePtr<LruAllocatorConfig>,
) -> Result<UniquePtr<LruAllocator>>;
type LruAllocatorConfig;
fn make_lru_allocator_config() -> Result<UniquePtr<LruAllocatorConfig>>;
fn setCacheSize(
self: Pin<&mut LruAllocatorConfig>,
size: usize,
) -> Pin<&mut LruAllocatorConfig>;
fn enable_container_memory_monitor(config: Pin<&mut LruAllocatorConfig>) -> Result<bool>;
#[namespace = "facebook::cachelib"]
type RebalanceStrategy;
fn make_hits_per_slab_rebalancer(
diff_ratio: f64,
min_retained_slabs: u32,
min_tail_age: u32,
) -> Result<SharedPtr<RebalanceStrategy>>;
fn make_lru_tail_age_rebalancer(
age_difference_ratio: f64,
min_retained_slabs: u32,
) -> Result<SharedPtr<RebalanceStrategy>>;
fn enable_free_memory_monitor(
config: Pin<&mut LruAllocatorConfig>,
interval: milliseconds,
advisePercentPerIteration: u32,
maxAdvisePercentage: u32,
lowerLimit: u32,
upperLimit: u32,
adviseStrategy: SharedPtr<RebalanceStrategy>,
) -> Result<()>;
fn enable_resident_memory_monitor(
config: Pin<&mut LruAllocatorConfig>,
interval: milliseconds,
advisePercentPerIteration: u32,
maxAdvisePercentage: u32,
lowerLimit: u32,
upperLimit: u32,
adviseStrategy: SharedPtr<RebalanceStrategy>,
) -> Result<()>;
fn enable_pool_rebalancing(
config: Pin<&mut LruAllocatorConfig>,
strategy: SharedPtr<RebalanceStrategy>,
interval: milliseconds,
) -> Result<()>;
fn enable_pool_resizing(
config: Pin<&mut LruAllocatorConfig>,
strategy: SharedPtr<RebalanceStrategy>,
interval: milliseconds,
slabs_per_iteration: u32,
) -> Result<()>;
fn set_access_config(
config: Pin<&mut LruAllocatorConfig>,
bucketsPower: u32,
locksPower: u32,
) -> Result<()>;
fn set_base_address(config: Pin<&mut LruAllocatorConfig>, addr: usize) -> Result<()>;
fn setCacheName<'a>(
self: Pin<&'a mut LruAllocatorConfig>,
name: &CxxString,
) -> Pin<&'a mut LruAllocatorConfig>;
fn enable_cache_persistence(
config: Pin<&mut LruAllocatorConfig>,
directory: Pin<&mut CxxString>,
);
fn add_pool(cache: &LruAllocator, name: StringPiece<'_>, size: usize) -> Result<i8>;
fn get_unreserved_size(cache: &LruAllocator) -> usize;
type LruItemHandle;
fn get_size(handle: &LruItemHandle) -> usize;
fn get_memory(handle: &LruItemHandle) -> *const u8;
fn get_writable_memory(handle: Pin<&mut LruItemHandle>) -> Result<*mut u8>;
unsafe fn get_item_ptr_as_offset(cache: &LruAllocator, ptr: *const u8) -> Result<usize>;
fn allocate_item(
cache: &LruAllocator,
id: i8,
key: StringPiece<'_>,
size: usize,
) -> Result<UniquePtr<LruItemHandle>>;
fn insert_handle(cache: &LruAllocator, handle: Pin<&mut LruItemHandle>) -> Result<bool>;
fn insert_or_replace_handle(
cache: &LruAllocator,
handle: Pin<&mut LruItemHandle>,
) -> Result<()>;
fn remove_item(cache: &LruAllocator, key: StringPiece<'_>) -> Result<()>;
fn find_item(
cache: &LruAllocator,
key: StringPiece<'_>,
) -> Result<UniquePtr<LruItemHandle>>;
fn get_pool_size(cache: &LruAllocator, pool: i8) -> Result<usize>;
fn grow_pool(cache: &LruAllocator, pool: i8, size: usize) -> Result<bool>;
fn shrink_pool(cache: &LruAllocator, pool: i8, size: usize) -> Result<bool>;
fn resize_pools(cache: &LruAllocator, src: i8, dst: i8, size: usize) -> Result<bool>;
}
}
/// The C++ implementation of LruAllocator is thread safe.
unsafe impl Send for ffi::LruAllocator {}
unsafe impl Sync for ffi::LruAllocator {}
/// The C++ implementation of CacheAdmin is thread safe.
unsafe impl Send for ffi::CacheAdmin {}
unsafe impl Sync for ffi::CacheAdmin {}
|
pub mod page_state;
pub mod panel;
pub mod view;
pub use self::page_state::LllPageState;
pub use self::panel::LllPanel;
pub use self::view::LllView;
|
extern crate git2;
extern crate chrono;
use git2::{Commit, Direction, ObjectType, Oid, Repository, Signature};
use std::fs::{canonicalize, File};
use std::io::Write;
use std::path::Path;
use chrono::prelude::*;
use chrono::offset::LocalResult;
fn find_last_commit(repo: &Repository) -> Result<Commit, git2::Error> {
let obj = repo.head()?.resolve()?.peel(ObjectType::Commit)?;
obj.into_commit()
.map_err(|_| git2::Error::from_str("Couldn't find commit"))
}
fn display_commit(commit: &Commit) {
let timestamp = commit.time().seconds();
let tm = Utc.timestamp(timestamp,0);//time::at(time::Timespec::new(timestamp, 0));
println!(
"commit {}\nAuthor: {}\nDate: {}\n\n {}",
commit.id(),
commit.author(),
tm.to_rfc2822(),
commit.message().unwrap_or("no commit message")
);
}
fn add_and_commit(repo: &Repository, path: &Path, message: &str) -> Result<Oid, git2::Error> {
let mut index = repo.index()?;
index.add_path(path)?;
let oid = index.write_tree()?;
let signature = Signature::now("Zbigniew Siciarz", "zbigniew@siciarz.net")?;
let parent_commit = find_last_commit(repo)?;
let tree = repo.find_tree(oid)?;
repo.commit(
Some("HEAD"), // point HEAD to our new commit
&signature, // author
&signature, // committer
message, // commit message
&tree, // tree
&[&parent_commit],
) // parents
}
fn push(repo: &Repository, url: &str) -> Result<(), git2::Error> {
let mut remote = match repo.find_remote("yrong") {
Ok(r) => r,
Err(_) => repo.remote("yrong", url)?,
};
remote.connect(Direction::Push)?;
remote.push(&["refs/heads/master:refs/heads/master"], None)
}
fn main() {
println!("24 Days of Rust vol. 2 - git2");
let repo_root = std::env::args().nth(1).unwrap_or(".".to_string());
let repo = Repository::open(repo_root.as_str()).expect("Couldn't open repository");
println!("{} state={:?}", repo.path().display(), repo.state());
let commit = find_last_commit(&repo).expect("Couldn't find last commit");
display_commit(&commit);
let relative_path = Path::new("example.txt");
{
let file_path = Path::new(repo_root.as_str()).join(relative_path);
let mut file = File::create(file_path.clone()).expect("Couldn't create file");
file.write_all(b"Hello git2").unwrap();
}
let commit_id = add_and_commit(&repo, relative_path, "Add example text file")
.expect("Couldn't add file to repo");
println!("New commit: {}", commit_id);
let remote_url = "git@github.com:yrong/24daysofrust.git";
// format!(
// "file://{}",
// canonicalize(".git/config").unwrap().display()
// );
println!("Pushing to: {}", remote_url);
// push(&repo, remote_url).expect("Couldn't push to remote repo");
}
|
//! Lint on use of `size_of` or `size_of_val` of T in an expression
//! expecting a count of T
use clippy_utils::diagnostics::span_lint_and_help;
use clippy_utils::{match_def_path, paths};
use if_chain::if_chain;
use rustc_hir::BinOpKind;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::ty::{self, Ty, TyS, TypeAndMut};
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Detects expressions where
/// `size_of::<T>` or `size_of_val::<T>` is used as a
/// count of elements of type `T`
///
/// ### Why is this bad?
/// These functions expect a count
/// of `T` and not a number of bytes
///
/// ### Example
/// ```rust,no_run
/// # use std::ptr::copy_nonoverlapping;
/// # use std::mem::size_of;
/// const SIZE: usize = 128;
/// let x = [2u8; SIZE];
/// let mut y = [2u8; SIZE];
/// unsafe { copy_nonoverlapping(x.as_ptr(), y.as_mut_ptr(), size_of::<u8>() * SIZE) };
/// ```
#[clippy::version = "1.50.0"]
pub SIZE_OF_IN_ELEMENT_COUNT,
correctness,
"using `size_of::<T>` or `size_of_val::<T>` where a count of elements of `T` is expected"
}
declare_lint_pass!(SizeOfInElementCount => [SIZE_OF_IN_ELEMENT_COUNT]);
fn get_size_of_ty(cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>, inverted: bool) -> Option<Ty<'tcx>> {
match expr.kind {
ExprKind::Call(count_func, _func_args) => {
if_chain! {
if !inverted;
if let ExprKind::Path(ref count_func_qpath) = count_func.kind;
if let Some(def_id) = cx.qpath_res(count_func_qpath, count_func.hir_id).opt_def_id();
if match_def_path(cx, def_id, &paths::MEM_SIZE_OF)
|| match_def_path(cx, def_id, &paths::MEM_SIZE_OF_VAL);
then {
cx.typeck_results().node_substs(count_func.hir_id).types().next()
} else {
None
}
}
},
ExprKind::Binary(op, left, right) if BinOpKind::Mul == op.node => {
get_size_of_ty(cx, left, inverted).or_else(|| get_size_of_ty(cx, right, inverted))
},
ExprKind::Binary(op, left, right) if BinOpKind::Div == op.node => {
get_size_of_ty(cx, left, inverted).or_else(|| get_size_of_ty(cx, right, !inverted))
},
ExprKind::Cast(expr, _) => get_size_of_ty(cx, expr, inverted),
_ => None,
}
}
fn get_pointee_ty_and_count_expr(cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) -> Option<(Ty<'tcx>, &'tcx Expr<'tcx>)> {
const FUNCTIONS: [&[&str]; 8] = [
&paths::PTR_COPY_NONOVERLAPPING,
&paths::PTR_COPY,
&paths::PTR_WRITE_BYTES,
&paths::PTR_SWAP_NONOVERLAPPING,
&paths::PTR_SLICE_FROM_RAW_PARTS,
&paths::PTR_SLICE_FROM_RAW_PARTS_MUT,
&paths::SLICE_FROM_RAW_PARTS,
&paths::SLICE_FROM_RAW_PARTS_MUT,
];
const METHODS: [&str; 11] = [
"write_bytes",
"copy_to",
"copy_from",
"copy_to_nonoverlapping",
"copy_from_nonoverlapping",
"add",
"wrapping_add",
"sub",
"wrapping_sub",
"offset",
"wrapping_offset",
];
if_chain! {
// Find calls to ptr::{copy, copy_nonoverlapping}
// and ptr::{swap_nonoverlapping, write_bytes},
if let ExprKind::Call(func, [.., count]) = expr.kind;
if let ExprKind::Path(ref func_qpath) = func.kind;
if let Some(def_id) = cx.qpath_res(func_qpath, func.hir_id).opt_def_id();
if FUNCTIONS.iter().any(|func_path| match_def_path(cx, def_id, func_path));
// Get the pointee type
if let Some(pointee_ty) = cx.typeck_results().node_substs(func.hir_id).types().next();
then {
return Some((pointee_ty, count));
}
};
if_chain! {
// Find calls to copy_{from,to}{,_nonoverlapping} and write_bytes methods
if let ExprKind::MethodCall(method_path, _, [ptr_self, .., count], _) = expr.kind;
let method_ident = method_path.ident.as_str();
if METHODS.iter().any(|m| *m == &*method_ident);
// Get the pointee type
if let ty::RawPtr(TypeAndMut { ty: pointee_ty, .. }) =
cx.typeck_results().expr_ty(ptr_self).kind();
then {
return Some((pointee_ty, count));
}
};
None
}
impl<'tcx> LateLintPass<'tcx> for SizeOfInElementCount {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
const HELP_MSG: &str = "use a count of elements instead of a count of bytes\
, it already gets multiplied by the size of the type";
const LINT_MSG: &str = "found a count of bytes \
instead of a count of elements of `T`";
if_chain! {
// Find calls to functions with an element count parameter and get
// the pointee type and count parameter expression
if let Some((pointee_ty, count_expr)) = get_pointee_ty_and_count_expr(cx, expr);
// Find a size_of call in the count parameter expression and
// check that it's the same type
if let Some(ty_used_for_size_of) = get_size_of_ty(cx, count_expr, false);
if TyS::same_type(pointee_ty, ty_used_for_size_of);
then {
span_lint_and_help(
cx,
SIZE_OF_IN_ELEMENT_COUNT,
count_expr.span,
LINT_MSG,
None,
HELP_MSG
);
}
};
}
}
|
use std::mem;
pub type Sorttype = i32;
pub fn optimal_sort(m: &Vec<Sorttype>) -> Vec<Sorttype> {
let mut o1 = m.clone();
for i in (0..m.len()-1).step_by(2) {
if o1[i] > o1[i+1] {
o1.swap(i, i+1);
}
}
let mut diff = 2;
let mut o2: Vec<Sorttype> = Vec::with_capacity(o1.len());
while diff < m.len() {
for i in (0..m.len()).step_by(2*diff) {
let mut j=i;
let mut k=i+diff;
while j < i+diff && k < i+2*diff {
if j < i+diff {
if o1[j] <= o1[k] {
o2.push(o1[j]);
j+=1;
}
}
if k < i+2*diff {
if o1[j] >= o1[k] {
o2.push(o1[k]);
k+=1;
}
}
}
for jj in j..i+diff {
o2.push(o1[jj]);
}
for kk in k..i+2*diff {
o2.push(o1[kk]);
}
}
diff <<= 1;
mem::swap(&mut o1, &mut o2);
o2.clear();
}
return o1;
}
|
use objc::{msg_send, sel, sel_impl};
use objc_id::Id;
use super::nil;
use objc_foundation::{object_struct, INSObject};
#[cfg(feature = "parent")]
use raw_window_handle::RawWindowHandle;
pub trait INSWindow: INSObject {
#[cfg(feature = "parent")]
fn from_raw_window_handle(h: &RawWindowHandle) -> Id<Self> {
match h {
RawWindowHandle::MacOS(h) => {
let id = h.ns_window as *mut Self;
unsafe { Id::from_ptr(id) }
}
_ => unreachable!("Unsuported window handle, expected: MacOS"),
}
}
fn make_key_and_order_front(&self) {
let _: () = unsafe { msg_send![self, makeKeyAndOrderFront: nil] };
}
}
object_struct!(NSWindow);
impl INSWindow for NSWindow {}
|
#[doc = r"Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r"Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::PCUART {
#[doc = r"Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
self.register.set(f(&R { bits }, &mut W { bits }).bits);
}
#[doc = r"Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r"Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
self.register.set(
f(&mut W {
bits: Self::reset_value(),
})
.bits,
);
}
#[doc = r"Reset value of the register"]
#[inline(always)]
pub const fn reset_value() -> u32 {
0
}
#[doc = r"Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.register.set(Self::reset_value())
}
}
#[doc = r"Value of the field"]
pub struct SYSCTL_PCUART_P0R {
bits: bool,
}
impl SYSCTL_PCUART_P0R {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _SYSCTL_PCUART_P0W<'a> {
w: &'a mut W,
}
impl<'a> _SYSCTL_PCUART_P0W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 0);
self.w.bits |= ((value as u32) & 1) << 0;
self.w
}
}
#[doc = r"Value of the field"]
pub struct SYSCTL_PCUART_P1R {
bits: bool,
}
impl SYSCTL_PCUART_P1R {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _SYSCTL_PCUART_P1W<'a> {
w: &'a mut W,
}
impl<'a> _SYSCTL_PCUART_P1W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 1);
self.w.bits |= ((value as u32) & 1) << 1;
self.w
}
}
#[doc = r"Value of the field"]
pub struct SYSCTL_PCUART_P2R {
bits: bool,
}
impl SYSCTL_PCUART_P2R {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _SYSCTL_PCUART_P2W<'a> {
w: &'a mut W,
}
impl<'a> _SYSCTL_PCUART_P2W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 2);
self.w.bits |= ((value as u32) & 1) << 2;
self.w
}
}
#[doc = r"Value of the field"]
pub struct SYSCTL_PCUART_P3R {
bits: bool,
}
impl SYSCTL_PCUART_P3R {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _SYSCTL_PCUART_P3W<'a> {
w: &'a mut W,
}
impl<'a> _SYSCTL_PCUART_P3W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 3);
self.w.bits |= ((value as u32) & 1) << 3;
self.w
}
}
#[doc = r"Value of the field"]
pub struct SYSCTL_PCUART_P4R {
bits: bool,
}
impl SYSCTL_PCUART_P4R {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _SYSCTL_PCUART_P4W<'a> {
w: &'a mut W,
}
impl<'a> _SYSCTL_PCUART_P4W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 4);
self.w.bits |= ((value as u32) & 1) << 4;
self.w
}
}
#[doc = r"Value of the field"]
pub struct SYSCTL_PCUART_P5R {
bits: bool,
}
impl SYSCTL_PCUART_P5R {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _SYSCTL_PCUART_P5W<'a> {
w: &'a mut W,
}
impl<'a> _SYSCTL_PCUART_P5W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 5);
self.w.bits |= ((value as u32) & 1) << 5;
self.w
}
}
#[doc = r"Value of the field"]
pub struct SYSCTL_PCUART_P6R {
bits: bool,
}
impl SYSCTL_PCUART_P6R {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _SYSCTL_PCUART_P6W<'a> {
w: &'a mut W,
}
impl<'a> _SYSCTL_PCUART_P6W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 6);
self.w.bits |= ((value as u32) & 1) << 6;
self.w
}
}
#[doc = r"Value of the field"]
pub struct SYSCTL_PCUART_P7R {
bits: bool,
}
impl SYSCTL_PCUART_P7R {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _SYSCTL_PCUART_P7W<'a> {
w: &'a mut W,
}
impl<'a> _SYSCTL_PCUART_P7W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 7);
self.w.bits |= ((value as u32) & 1) << 7;
self.w
}
}
impl R {
#[doc = r"Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0 - UART Module 0 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p0(&self) -> SYSCTL_PCUART_P0R {
let bits = ((self.bits >> 0) & 1) != 0;
SYSCTL_PCUART_P0R { bits }
}
#[doc = "Bit 1 - UART Module 1 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p1(&self) -> SYSCTL_PCUART_P1R {
let bits = ((self.bits >> 1) & 1) != 0;
SYSCTL_PCUART_P1R { bits }
}
#[doc = "Bit 2 - UART Module 2 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p2(&self) -> SYSCTL_PCUART_P2R {
let bits = ((self.bits >> 2) & 1) != 0;
SYSCTL_PCUART_P2R { bits }
}
#[doc = "Bit 3 - UART Module 3 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p3(&self) -> SYSCTL_PCUART_P3R {
let bits = ((self.bits >> 3) & 1) != 0;
SYSCTL_PCUART_P3R { bits }
}
#[doc = "Bit 4 - UART Module 4 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p4(&self) -> SYSCTL_PCUART_P4R {
let bits = ((self.bits >> 4) & 1) != 0;
SYSCTL_PCUART_P4R { bits }
}
#[doc = "Bit 5 - UART Module 5 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p5(&self) -> SYSCTL_PCUART_P5R {
let bits = ((self.bits >> 5) & 1) != 0;
SYSCTL_PCUART_P5R { bits }
}
#[doc = "Bit 6 - UART Module 6 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p6(&self) -> SYSCTL_PCUART_P6R {
let bits = ((self.bits >> 6) & 1) != 0;
SYSCTL_PCUART_P6R { bits }
}
#[doc = "Bit 7 - UART Module 7 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p7(&self) -> SYSCTL_PCUART_P7R {
let bits = ((self.bits >> 7) & 1) != 0;
SYSCTL_PCUART_P7R { bits }
}
}
impl W {
#[doc = r"Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - UART Module 0 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p0(&mut self) -> _SYSCTL_PCUART_P0W {
_SYSCTL_PCUART_P0W { w: self }
}
#[doc = "Bit 1 - UART Module 1 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p1(&mut self) -> _SYSCTL_PCUART_P1W {
_SYSCTL_PCUART_P1W { w: self }
}
#[doc = "Bit 2 - UART Module 2 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p2(&mut self) -> _SYSCTL_PCUART_P2W {
_SYSCTL_PCUART_P2W { w: self }
}
#[doc = "Bit 3 - UART Module 3 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p3(&mut self) -> _SYSCTL_PCUART_P3W {
_SYSCTL_PCUART_P3W { w: self }
}
#[doc = "Bit 4 - UART Module 4 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p4(&mut self) -> _SYSCTL_PCUART_P4W {
_SYSCTL_PCUART_P4W { w: self }
}
#[doc = "Bit 5 - UART Module 5 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p5(&mut self) -> _SYSCTL_PCUART_P5W {
_SYSCTL_PCUART_P5W { w: self }
}
#[doc = "Bit 6 - UART Module 6 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p6(&mut self) -> _SYSCTL_PCUART_P6W {
_SYSCTL_PCUART_P6W { w: self }
}
#[doc = "Bit 7 - UART Module 7 Power Control"]
#[inline(always)]
pub fn sysctl_pcuart_p7(&mut self) -> _SYSCTL_PCUART_P7W {
_SYSCTL_PCUART_P7W { w: self }
}
}
|
use std::cmp::{max, min};
use std::collections::{HashMap, HashSet};
use itertools::Itertools;
use whiteread::parse_line;
const ten97: usize = 1000000007;
const mmm: usize = 998244353;
fn alphabet2idx(c: char) -> usize {
if c.is_ascii_lowercase() {
c as u8 as usize - 'a' as u8 as usize
} else if c.is_ascii_uppercase() {
c as u8 as usize - 'A' as u8 as usize
} else {
panic!("wtf")
}
}
// an === b mod (p - 1) に帰着し
// (a,b)の組み合わせはaが生成する巡回群の位数の和を求めることに等しい
// 巡回群の位数の可能性は約数のみ(ラグランジュの定理)
// 効率よく計算する必要がある
fn main() {
let p: usize = parse_line().unwrap();
let mut gg: Vec<usize> = vec![];
let mut n: usize = p - 1;
for i in 1..(p as f64).sqrt() as usize + 1 {
if n % i == 0 {
gg.push(i);
if i * i != n {
gg.push(n / i);
}
}
}
gg.sort();
let mut ff: Vec<(usize, usize)> = vec![];
for g in gg.iter().rev() {
let mut tmp = (p - 1) / g;
for (preg, prenum) in ff.iter() {
if preg % g == 0 {
tmp -= prenum;
}
}
tmp %= mmm;
ff.push((*g, tmp));
}
let mut ans = 1;
for (i, g) in gg.into_iter().rev().enumerate() {
let mut tmp = ((p - 1) / g) % mmm;
tmp *= ff[i].1;
tmp %= mmm;
ans += tmp;
ans %= mmm;
}
println!("{}", ans);
}
|
use internship::IStr;
use std::collections::HashMap;
use std::fmt;
use std::rc::Rc;
#[derive(Debug, Clone)]
struct Term(pub Rc<Term_>);
impl Term {
pub fn var(v: IStr) -> Term {
Term(Rc::new(Var(v)))
}
pub fn var_(v: &'static str) -> Term {
Term(Rc::new(Var(IStr::new(v))))
}
pub fn apply(f: IStr, ts: Vec<Term>) -> Term {
Term(Rc::new(Apply(f, ts)))
}
pub fn apply_(f: &'static str, ts: Vec<Term>) -> Term {
Term(Rc::new(Apply(IStr::new(f), ts)))
}
}
#[derive(Debug, Clone)]
enum Term_ {
Var(IStr),
Apply(IStr, Vec<Term>),
}
use Term_::*;
impl fmt::Display for Term {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0.as_ref() {
Var(ref x) => write!(f, "{}", x),
Apply(k, ts) => {
if ts.len() == 0 {
write!(f, "{}", k)
} else {
let mut it = ts.iter();
write!(f, "{}(", k)?;
if let Some(s) = it.next() {
write!(f, "{}", s)?;
for s in it {
write!(f, ", ")?;
write!(f, "{}", s)?;
}
}
write!(f, ")")
}
}
}
}
}
fn occurs_check(x: &IStr, t: &Term) -> bool {
match t.0.as_ref() {
Var(ref y) => x.eq(y),
Apply(_, ref ts) => {
for t in ts {
if occurs_check(x, t) {
return true;
}
}
return false;
}
}
}
#[derive(Debug, Clone)]
struct Subs(HashMap<IStr, Term>);
fn apply(sub: &Subs, s: Term) -> Term {
match s.0.as_ref() {
Var(ref x) => {
let t = sub.0.get(x);
t.unwrap_or(&s).clone()
}
Apply(ref f, ref ts) => {
let uts: Vec<Term> = ts.iter().map(|t| apply(sub, t.clone())).collect();
Term::apply(f.clone(), uts)
}
}
}
fn unify(s: Term, t: Term) -> Result<Subs, String> {
let mut subs = Subs(HashMap::new());
let mut worklist: Vec<(Term, Term)> = vec![(s, t)];
while !worklist.is_empty() {
let (s, t) = worklist.pop().unwrap();
let s = apply(&subs, s);
let t = apply(&subs, t);
// println!("{} ?= {}", &s, &t);
match (s.0.as_ref(), t.0.as_ref()) {
(Var(ref x), Var(ref y)) => {
if !x.eq(y) {
subs.0.insert(x.clone(), t);
}
}
(Var(ref x), _) => {
if occurs_check(x, &t) {
return Err(format!("{} occurs in {}", &x, &t));
}
subs.0.insert(x.clone(), t);
}
(_, Var(ref x)) => {
if occurs_check(x, &s) {
return Err(format!("{} occurs in {}", &x, &s));
}
subs.0.insert(x.clone(), s);
}
(Apply(f, f_args), Apply(g, g_args)) => {
if !(f == g && f_args.len() == g_args.len()) {
return Err(String::from("structure"));
}
for (s, t) in f_args.iter().zip(g_args) {
worklist.push((s.clone(), t.clone()));
}
}
}
}
Ok(subs)
}
fn main() {
let e1 = Term::apply_(
"f",
vec![
Term::var_("X"),
Term::apply_("g", vec![Term::apply_("k", vec![]), Term::var_("Y")]),
],
);
println!("e1 = {}", e1);
let e2 = Term::apply_("f", vec![Term::apply_("j", vec![]), Term::var_("Z")]);
println!("e2 = {}", e2);
let u = unify(e1, e2);
println!("{:?}", u);
let e1 = Term::var_("X");
let e2 = Term::apply_("f", vec![Term::var_("X")]);
println!("{:?}", unify(e1, e2));
}
#[cfg(test)]
mod tests {
use crate::unify;
use crate::Term;
#[test]
fn term_display() {
assert_eq!(format!("{}", Term::apply_("k", vec![])), "k");
assert_eq!(format!("{}", Term::var_("X")), "X");
assert_eq!(
format!("{}", Term::apply_("f", vec![Term::var_("X")])),
"f(X)"
);
assert_eq!(
format!(
"{}",
Term::apply_("f", vec![Term::var_("X"), Term::var_("Y")])
),
"f(X, Y)"
);
}
#[test]
fn unify_test() {
let e1 = Term::apply_(
"f",
vec![
Term::var_("X"),
Term::apply_("g", vec![Term::apply_("k", vec![]), Term::var_("Y")]),
],
);
// println!("e1 = {}", e1);
let e2 = Term::apply_("f", vec![Term::apply_("j", vec![]), Term::var_("Z")]);
// println!("e2 = {}", e2);
let u = unify(e1, e2);
// println!("{:?}", u);
assert!(u.is_ok());
}
#[test]
fn nonunify_test() {
let e1 = Term::var_("X");
let e2 = Term::apply_("f", vec![Term::var_("X")]);
assert!(unify(e1, e2).is_err());
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.