text
stringlengths
8
4.13M
extern crate cc; fn main() { let mut compiler = cc::Build::new(); compiler.file("src/cc/city.cc").cpp(true).opt_level(3); compiler.compile("libchcityhash.a"); }
#[doc = "Reader of register PROC0_INTE3"] pub type R = crate::R<u32, super::PROC0_INTE3>; #[doc = "Writer for register PROC0_INTE3"] pub type W = crate::W<u32, super::PROC0_INTE3>; #[doc = "Register PROC0_INTE3 `reset()`'s with value 0"] impl crate::ResetValue for super::PROC0_INTE3 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `GPIO29_EDGE_HIGH`"] pub type GPIO29_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO29_EDGE_HIGH`"] pub struct GPIO29_EDGE_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO29_EDGE_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 23)) | (((value as u32) & 0x01) << 23); self.w } } #[doc = "Reader of field `GPIO29_EDGE_LOW`"] pub type GPIO29_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO29_EDGE_LOW`"] pub struct GPIO29_EDGE_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO29_EDGE_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 22)) | (((value as u32) & 0x01) << 22); self.w } } #[doc = "Reader of field `GPIO29_LEVEL_HIGH`"] pub type GPIO29_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO29_LEVEL_HIGH`"] pub struct GPIO29_LEVEL_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO29_LEVEL_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 21)) | (((value as u32) & 0x01) << 21); self.w } } #[doc = "Reader of field `GPIO29_LEVEL_LOW`"] pub type GPIO29_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO29_LEVEL_LOW`"] pub struct GPIO29_LEVEL_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO29_LEVEL_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20); self.w } } #[doc = "Reader of field `GPIO28_EDGE_HIGH`"] pub type GPIO28_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO28_EDGE_HIGH`"] pub struct GPIO28_EDGE_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO28_EDGE_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19); self.w } } #[doc = "Reader of field `GPIO28_EDGE_LOW`"] pub type GPIO28_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO28_EDGE_LOW`"] pub struct GPIO28_EDGE_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO28_EDGE_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18); self.w } } #[doc = "Reader of field `GPIO28_LEVEL_HIGH`"] pub type GPIO28_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO28_LEVEL_HIGH`"] pub struct GPIO28_LEVEL_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO28_LEVEL_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17); self.w } } #[doc = "Reader of field `GPIO28_LEVEL_LOW`"] pub type GPIO28_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO28_LEVEL_LOW`"] pub struct GPIO28_LEVEL_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO28_LEVEL_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16); self.w } } #[doc = "Reader of field `GPIO27_EDGE_HIGH`"] pub type GPIO27_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO27_EDGE_HIGH`"] pub struct GPIO27_EDGE_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO27_EDGE_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15); self.w } } #[doc = "Reader of field `GPIO27_EDGE_LOW`"] pub type GPIO27_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO27_EDGE_LOW`"] pub struct GPIO27_EDGE_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO27_EDGE_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14); self.w } } #[doc = "Reader of field `GPIO27_LEVEL_HIGH`"] pub type GPIO27_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO27_LEVEL_HIGH`"] pub struct GPIO27_LEVEL_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO27_LEVEL_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13); self.w } } #[doc = "Reader of field `GPIO27_LEVEL_LOW`"] pub type GPIO27_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO27_LEVEL_LOW`"] pub struct GPIO27_LEVEL_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO27_LEVEL_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12); self.w } } #[doc = "Reader of field `GPIO26_EDGE_HIGH`"] pub type GPIO26_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO26_EDGE_HIGH`"] pub struct GPIO26_EDGE_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO26_EDGE_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11); self.w } } #[doc = "Reader of field `GPIO26_EDGE_LOW`"] pub type GPIO26_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO26_EDGE_LOW`"] pub struct GPIO26_EDGE_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO26_EDGE_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10); self.w } } #[doc = "Reader of field `GPIO26_LEVEL_HIGH`"] pub type GPIO26_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO26_LEVEL_HIGH`"] pub struct GPIO26_LEVEL_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO26_LEVEL_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9); self.w } } #[doc = "Reader of field `GPIO26_LEVEL_LOW`"] pub type GPIO26_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO26_LEVEL_LOW`"] pub struct GPIO26_LEVEL_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO26_LEVEL_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8); self.w } } #[doc = "Reader of field `GPIO25_EDGE_HIGH`"] pub type GPIO25_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO25_EDGE_HIGH`"] pub struct GPIO25_EDGE_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO25_EDGE_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7); self.w } } #[doc = "Reader of field `GPIO25_EDGE_LOW`"] pub type GPIO25_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO25_EDGE_LOW`"] pub struct GPIO25_EDGE_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO25_EDGE_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6); self.w } } #[doc = "Reader of field `GPIO25_LEVEL_HIGH`"] pub type GPIO25_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO25_LEVEL_HIGH`"] pub struct GPIO25_LEVEL_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO25_LEVEL_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5); self.w } } #[doc = "Reader of field `GPIO25_LEVEL_LOW`"] pub type GPIO25_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO25_LEVEL_LOW`"] pub struct GPIO25_LEVEL_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO25_LEVEL_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4); self.w } } #[doc = "Reader of field `GPIO24_EDGE_HIGH`"] pub type GPIO24_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO24_EDGE_HIGH`"] pub struct GPIO24_EDGE_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO24_EDGE_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3); self.w } } #[doc = "Reader of field `GPIO24_EDGE_LOW`"] pub type GPIO24_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO24_EDGE_LOW`"] pub struct GPIO24_EDGE_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO24_EDGE_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "Reader of field `GPIO24_LEVEL_HIGH`"] pub type GPIO24_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO24_LEVEL_HIGH`"] pub struct GPIO24_LEVEL_HIGH_W<'a> { w: &'a mut W, } impl<'a> GPIO24_LEVEL_HIGH_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w } } #[doc = "Reader of field `GPIO24_LEVEL_LOW`"] pub type GPIO24_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Write proxy for field `GPIO24_LEVEL_LOW`"] pub struct GPIO24_LEVEL_LOW_W<'a> { w: &'a mut W, } impl<'a> GPIO24_LEVEL_LOW_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } impl R { #[doc = "Bit 23"] #[inline(always)] pub fn gpio29_edge_high(&self) -> GPIO29_EDGE_HIGH_R { GPIO29_EDGE_HIGH_R::new(((self.bits >> 23) & 0x01) != 0) } #[doc = "Bit 22"] #[inline(always)] pub fn gpio29_edge_low(&self) -> GPIO29_EDGE_LOW_R { GPIO29_EDGE_LOW_R::new(((self.bits >> 22) & 0x01) != 0) } #[doc = "Bit 21"] #[inline(always)] pub fn gpio29_level_high(&self) -> GPIO29_LEVEL_HIGH_R { GPIO29_LEVEL_HIGH_R::new(((self.bits >> 21) & 0x01) != 0) } #[doc = "Bit 20"] #[inline(always)] pub fn gpio29_level_low(&self) -> GPIO29_LEVEL_LOW_R { GPIO29_LEVEL_LOW_R::new(((self.bits >> 20) & 0x01) != 0) } #[doc = "Bit 19"] #[inline(always)] pub fn gpio28_edge_high(&self) -> GPIO28_EDGE_HIGH_R { GPIO28_EDGE_HIGH_R::new(((self.bits >> 19) & 0x01) != 0) } #[doc = "Bit 18"] #[inline(always)] pub fn gpio28_edge_low(&self) -> GPIO28_EDGE_LOW_R { GPIO28_EDGE_LOW_R::new(((self.bits >> 18) & 0x01) != 0) } #[doc = "Bit 17"] #[inline(always)] pub fn gpio28_level_high(&self) -> GPIO28_LEVEL_HIGH_R { GPIO28_LEVEL_HIGH_R::new(((self.bits >> 17) & 0x01) != 0) } #[doc = "Bit 16"] #[inline(always)] pub fn gpio28_level_low(&self) -> GPIO28_LEVEL_LOW_R { GPIO28_LEVEL_LOW_R::new(((self.bits >> 16) & 0x01) != 0) } #[doc = "Bit 15"] #[inline(always)] pub fn gpio27_edge_high(&self) -> GPIO27_EDGE_HIGH_R { GPIO27_EDGE_HIGH_R::new(((self.bits >> 15) & 0x01) != 0) } #[doc = "Bit 14"] #[inline(always)] pub fn gpio27_edge_low(&self) -> GPIO27_EDGE_LOW_R { GPIO27_EDGE_LOW_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bit 13"] #[inline(always)] pub fn gpio27_level_high(&self) -> GPIO27_LEVEL_HIGH_R { GPIO27_LEVEL_HIGH_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bit 12"] #[inline(always)] pub fn gpio27_level_low(&self) -> GPIO27_LEVEL_LOW_R { GPIO27_LEVEL_LOW_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 11"] #[inline(always)] pub fn gpio26_edge_high(&self) -> GPIO26_EDGE_HIGH_R { GPIO26_EDGE_HIGH_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 10"] #[inline(always)] pub fn gpio26_edge_low(&self) -> GPIO26_EDGE_LOW_R { GPIO26_EDGE_LOW_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 9"] #[inline(always)] pub fn gpio26_level_high(&self) -> GPIO26_LEVEL_HIGH_R { GPIO26_LEVEL_HIGH_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 8"] #[inline(always)] pub fn gpio26_level_low(&self) -> GPIO26_LEVEL_LOW_R { GPIO26_LEVEL_LOW_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 7"] #[inline(always)] pub fn gpio25_edge_high(&self) -> GPIO25_EDGE_HIGH_R { GPIO25_EDGE_HIGH_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bit 6"] #[inline(always)] pub fn gpio25_edge_low(&self) -> GPIO25_EDGE_LOW_R { GPIO25_EDGE_LOW_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 5"] #[inline(always)] pub fn gpio25_level_high(&self) -> GPIO25_LEVEL_HIGH_R { GPIO25_LEVEL_HIGH_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 4"] #[inline(always)] pub fn gpio25_level_low(&self) -> GPIO25_LEVEL_LOW_R { GPIO25_LEVEL_LOW_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 3"] #[inline(always)] pub fn gpio24_edge_high(&self) -> GPIO24_EDGE_HIGH_R { GPIO24_EDGE_HIGH_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 2"] #[inline(always)] pub fn gpio24_edge_low(&self) -> GPIO24_EDGE_LOW_R { GPIO24_EDGE_LOW_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 1"] #[inline(always)] pub fn gpio24_level_high(&self) -> GPIO24_LEVEL_HIGH_R { GPIO24_LEVEL_HIGH_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 0"] #[inline(always)] pub fn gpio24_level_low(&self) -> GPIO24_LEVEL_LOW_R { GPIO24_LEVEL_LOW_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 23"] #[inline(always)] pub fn gpio29_edge_high(&mut self) -> GPIO29_EDGE_HIGH_W { GPIO29_EDGE_HIGH_W { w: self } } #[doc = "Bit 22"] #[inline(always)] pub fn gpio29_edge_low(&mut self) -> GPIO29_EDGE_LOW_W { GPIO29_EDGE_LOW_W { w: self } } #[doc = "Bit 21"] #[inline(always)] pub fn gpio29_level_high(&mut self) -> GPIO29_LEVEL_HIGH_W { GPIO29_LEVEL_HIGH_W { w: self } } #[doc = "Bit 20"] #[inline(always)] pub fn gpio29_level_low(&mut self) -> GPIO29_LEVEL_LOW_W { GPIO29_LEVEL_LOW_W { w: self } } #[doc = "Bit 19"] #[inline(always)] pub fn gpio28_edge_high(&mut self) -> GPIO28_EDGE_HIGH_W { GPIO28_EDGE_HIGH_W { w: self } } #[doc = "Bit 18"] #[inline(always)] pub fn gpio28_edge_low(&mut self) -> GPIO28_EDGE_LOW_W { GPIO28_EDGE_LOW_W { w: self } } #[doc = "Bit 17"] #[inline(always)] pub fn gpio28_level_high(&mut self) -> GPIO28_LEVEL_HIGH_W { GPIO28_LEVEL_HIGH_W { w: self } } #[doc = "Bit 16"] #[inline(always)] pub fn gpio28_level_low(&mut self) -> GPIO28_LEVEL_LOW_W { GPIO28_LEVEL_LOW_W { w: self } } #[doc = "Bit 15"] #[inline(always)] pub fn gpio27_edge_high(&mut self) -> GPIO27_EDGE_HIGH_W { GPIO27_EDGE_HIGH_W { w: self } } #[doc = "Bit 14"] #[inline(always)] pub fn gpio27_edge_low(&mut self) -> GPIO27_EDGE_LOW_W { GPIO27_EDGE_LOW_W { w: self } } #[doc = "Bit 13"] #[inline(always)] pub fn gpio27_level_high(&mut self) -> GPIO27_LEVEL_HIGH_W { GPIO27_LEVEL_HIGH_W { w: self } } #[doc = "Bit 12"] #[inline(always)] pub fn gpio27_level_low(&mut self) -> GPIO27_LEVEL_LOW_W { GPIO27_LEVEL_LOW_W { w: self } } #[doc = "Bit 11"] #[inline(always)] pub fn gpio26_edge_high(&mut self) -> GPIO26_EDGE_HIGH_W { GPIO26_EDGE_HIGH_W { w: self } } #[doc = "Bit 10"] #[inline(always)] pub fn gpio26_edge_low(&mut self) -> GPIO26_EDGE_LOW_W { GPIO26_EDGE_LOW_W { w: self } } #[doc = "Bit 9"] #[inline(always)] pub fn gpio26_level_high(&mut self) -> GPIO26_LEVEL_HIGH_W { GPIO26_LEVEL_HIGH_W { w: self } } #[doc = "Bit 8"] #[inline(always)] pub fn gpio26_level_low(&mut self) -> GPIO26_LEVEL_LOW_W { GPIO26_LEVEL_LOW_W { w: self } } #[doc = "Bit 7"] #[inline(always)] pub fn gpio25_edge_high(&mut self) -> GPIO25_EDGE_HIGH_W { GPIO25_EDGE_HIGH_W { w: self } } #[doc = "Bit 6"] #[inline(always)] pub fn gpio25_edge_low(&mut self) -> GPIO25_EDGE_LOW_W { GPIO25_EDGE_LOW_W { w: self } } #[doc = "Bit 5"] #[inline(always)] pub fn gpio25_level_high(&mut self) -> GPIO25_LEVEL_HIGH_W { GPIO25_LEVEL_HIGH_W { w: self } } #[doc = "Bit 4"] #[inline(always)] pub fn gpio25_level_low(&mut self) -> GPIO25_LEVEL_LOW_W { GPIO25_LEVEL_LOW_W { w: self } } #[doc = "Bit 3"] #[inline(always)] pub fn gpio24_edge_high(&mut self) -> GPIO24_EDGE_HIGH_W { GPIO24_EDGE_HIGH_W { w: self } } #[doc = "Bit 2"] #[inline(always)] pub fn gpio24_edge_low(&mut self) -> GPIO24_EDGE_LOW_W { GPIO24_EDGE_LOW_W { w: self } } #[doc = "Bit 1"] #[inline(always)] pub fn gpio24_level_high(&mut self) -> GPIO24_LEVEL_HIGH_W { GPIO24_LEVEL_HIGH_W { w: self } } #[doc = "Bit 0"] #[inline(always)] pub fn gpio24_level_low(&mut self) -> GPIO24_LEVEL_LOW_W { GPIO24_LEVEL_LOW_W { w: self } } }
use std::marker::PhantomData; use amethyst::{ core::{ bundle::SystemBundle, deferred_dispatcher_operation::{AddBundle, AddSystem, AddSystemDesc, DispatcherOperation}, ArcThreadPool, }, ecs::{Dispatcher, DispatcherBuilder, System}, prelude::*, DataDispose, Error, }; pub struct NiceGameData<'a, 'b> { dispatcher: Option<Dispatcher<'a, 'b>>, } impl<'a, 'b> NiceGameData<'a, 'b> { pub fn new(dispatcher: Dispatcher<'a, 'b>) -> Self { NiceGameData { dispatcher: Some(dispatcher), } } pub fn update(&mut self, world: &World) { if let Some(dispatcher) = &mut self.dispatcher.as_mut() { dispatcher.dispatch(&world); } } pub fn dispose(&mut self, mut world: &mut World) { if let Some(dispatcher) = self.dispatcher.take() { dispatcher.dispose(&mut world); } } } // why is this neccessary if we impls in NiceGameData? impl DataDispose for NiceGameData<'_, '_> { fn dispose(&mut self, world: &mut World) { self.dispose(world); } } pub struct NiceGameDataBuilder<'a, 'b> { dispatcher_ops: Vec<Box<dyn DispatcherOperation<'a, 'b>>>, dispatcher_builder: DispatcherBuilder<'a, 'b>, } impl<'a, 'b> Default for NiceGameDataBuilder<'a, 'b> { fn default() -> Self { NiceGameDataBuilder::new() } } impl<'a, 'b> NiceGameDataBuilder<'a, 'b> { pub fn new() -> Self { NiceGameDataBuilder { dispatcher_ops: Vec::new(), dispatcher_builder: DispatcherBuilder::new(), } } pub fn with_bundle<B>(mut self, bundle: B) -> Self where B: SystemBundle<'a, 'b> + 'static, { self.dispatcher_ops.push(Box::new(AddBundle { bundle })); self } pub fn with_system_desc<SD, S, N>( mut self, system_desc: SD, name: N, dependencies: &[N], ) -> Self where SD: SystemDesc<'a, 'b, S> + 'static, S: for<'c> System<'c> + 'static + Send, N: Into<String> + Clone, { let name = Into::<String>::into(name); let dependencies = dependencies .iter() .map(Clone::clone) .map(Into::<String>::into) .collect::<Vec<String>>(); let op = Box::new(AddSystemDesc { system_desc, name, dependencies, marker: PhantomData::<S>, }) as Box<dyn DispatcherOperation<'a, 'b> + 'static>; self.dispatcher_ops.push(op); self } pub fn with<S, N>(mut self, system: S, name: N, dependencies: &[N]) -> Self where S: for<'c> System<'c> + 'static + Send, N: Into<String> + Clone, { let name = Into::<String>::into(name); let dependencies = dependencies .iter() .map(Clone::clone) .map(Into::<String>::into) .collect::<Vec<String>>(); let op = Box::new(AddSystem { system, name, dependencies, }) as Box<dyn DispatcherOperation<'a, 'b> + 'static>; self.dispatcher_ops.push(op); self } //TODO config values for no threading pub fn build_dispatcher(self, mut world: &mut World) -> Dispatcher<'a, 'b> { let pool = (*world.read_resource::<ArcThreadPool>()).clone(); let mut dispatcher_builder = self.dispatcher_builder; self.dispatcher_ops .into_iter() .try_for_each(|dispatcher_op| dispatcher_op.exec(world, &mut dispatcher_builder)) .unwrap_or_else(|e| panic!("Unable to init dispatcher: {}", e)); let mut dispatcher = dispatcher_builder.with_pool(pool).build(); dispatcher.setup(&mut world); dispatcher } } impl<'a, 'b> DataInit<NiceGameData<'a, 'b>> for NiceGameDataBuilder<'a, 'b> { fn build(self, world: &mut World) -> NiceGameData<'a, 'b> { NiceGameData::new(self.build_dispatcher(world)) } }
fn main(){ //enum Option<T> { // Some(T), // None, //} let null_var: Option<i32> = None; let not_null: Option<i32> = Some(3); fn print_value(v: Option<i32>){ match v{ Some(num) => println!("{}", num), None => println!("No value"), } } print_value(null_var); print_value(not_null); }
//! This module includes a high level abstraction over a DICOM data element's value. use crate::header::{EmptyObject, HasLength, Length, Tag}; use num_traits::NumCast; use smallvec::SmallVec; use std::{borrow::Cow, str::FromStr}; pub mod deserialize; pub mod partial; mod primitive; pub mod range; pub mod serialize; pub use self::deserialize::Error as DeserializeError; pub use self::partial::{DicomDate, DicomDateTime, DicomTime}; pub use self::range::{AsRange, DateRange, DateTimeRange, TimeRange}; pub use self::primitive::{ CastValueError, ConvertValueError, InvalidValueReadError, PrimitiveValue, ValueType, }; /// re-exported from chrono use chrono::FixedOffset; /// An aggregation of one or more elements in a value. pub type C<T> = SmallVec<[T; 2]>; /// A trait for a value that maps to a DICOM element data value. pub trait DicomValueType: HasLength { /// Retrieve the specific type of this value. fn value_type(&self) -> ValueType; /// Retrieve the number of elements contained in the DICOM value. /// /// In a sequence value, this is the number of items in the sequence. /// In an encapsulated pixel data sequence, the output is always 1. /// Otherwise, the output is the number of elements effectively encoded /// in the value. fn cardinality(&self) -> usize; } /// Representation of a full DICOM value, which may be either primitive or /// another DICOM object. /// /// `I` is the complex type for nest data set items, which should usually /// implement [`HasLength`]. /// `P` is the encapsulated pixel data provider, which should usually /// implement `AsRef<[u8]>`. /// /// [`HasLength`]: ../header/trait.HasLength.html #[derive(Debug, Clone, PartialEq)] pub enum Value<I = EmptyObject, P = [u8; 0]> { /// Primitive value. Primitive(PrimitiveValue), /// A complex sequence of items. Sequence { /// Item collection. items: C<I>, /// The size in bytes (length). size: Length, }, /// An encapsulated pixel data sequence. PixelSequence { /// The value contents of the offset table. offset_table: C<u32>, /// The sequence of compressed fragments. fragments: C<P>, }, } impl<P> Value<EmptyObject, P> { /// Construct a DICOM pixel sequence sequence value /// from an offset rable and a list of fragments. /// /// Note: This function does not validate the offset table /// against the fragments. pub fn new_pixel_sequence<T>(offset_table: C<u32>, fragments: T) -> Self where T: Into<C<P>>, { Value::PixelSequence { offset_table, fragments: fragments.into(), } } } impl<I> Value<I, [u8; 0]> { /// Construct a full DICOM data set sequence value /// from a list of items and length. #[inline] pub fn new_sequence<T>(items: T, length: Length) -> Self where T: Into<C<I>>, { Value::Sequence { items: items.into(), size: length, } } } impl Value<EmptyObject, [u8; 0]> { /// Construct a DICOM value from a primitive value. /// /// This is equivalent to `Value::from` in behavior, /// except that suitable type parameters are specified /// instead of inferred. #[inline] pub fn new(value: PrimitiveValue) -> Self { Self::from(value) } } impl<I, P> Value<I, P> { /// Obtain the number of individual values. /// In a primitive, this is the number of individual elements in the value. /// In a sequence item, this is the number of items. /// In a pixel sequence, this is currently set to 1 /// regardless of the number of compressed fragments or frames. pub fn multiplicity(&self) -> u32 { match *self { Value::Primitive(ref v) => v.multiplicity(), Value::Sequence { ref items, .. } => items.len() as u32, Value::PixelSequence { .. } => 1, } } /// Gets a reference to the primitive value. pub fn primitive(&self) -> Option<&PrimitiveValue> { match *self { Value::Primitive(ref v) => Some(v), _ => None, } } /// Gets a reference to the items. pub fn items(&self) -> Option<&[I]> { match *self { Value::Sequence { ref items, .. } => Some(items), _ => None, } } /// Retrieves the primitive value. pub fn into_primitive(self) -> Option<PrimitiveValue> { match self { Value::Primitive(v) => Some(v), _ => None, } } /// Retrieves the items. pub fn into_items(self) -> Option<C<I>> { match self { Value::Sequence { items, .. } => Some(items), _ => None, } } /// Gets a reference to the encapsulated pixel data's offset table. pub fn offset_table(&self) -> Option<&[u32]> { match self { Value::PixelSequence { offset_table, .. } => Some(offset_table), _ => None, } } } impl<I, P> HasLength for Value<I, P> { fn length(&self) -> Length { match self { Value::Primitive(v) => v.length(), Value::Sequence { size, .. } => *size, Value::PixelSequence { .. } => Length::UNDEFINED, } } } impl<I, P> DicomValueType for Value<I, P> { fn value_type(&self) -> ValueType { match self { Value::Primitive(v) => v.value_type(), Value::Sequence { .. } => ValueType::Item, Value::PixelSequence { .. } => ValueType::PixelSequence, } } fn cardinality(&self) -> usize { match self { Value::Primitive(v) => v.cardinality(), Value::Sequence { items, .. } => items.len(), Value::PixelSequence { .. } => 1, } } } impl<I, P> Value<I, P> where I: HasLength, { /// Convert the full primitive value into a single string. /// /// If the value contains multiple strings, they are concatenated /// (separated by the standard DICOM value delimiter `'\\'`) /// into an owned string. /// /// Returns an error if the value is not primitive. pub fn to_str(&self) -> Result<Cow<str>, CastValueError> { match self { Value::Primitive(prim) => Ok(prim.to_str()), _ => Err(CastValueError { requested: "string", got: self.value_type(), }), } } /// Convert the full primitive value into a clean string. /// /// Returns an error if the value is not primitive. pub fn to_clean_str(&self) -> Result<Cow<str>, CastValueError> { match self { Value::Primitive(prim) => Ok(prim.to_clean_str()), _ => Err(CastValueError { requested: "string", got: self.value_type(), }), } } /// Convert the full primitive value into a sequence of strings. /// /// If the value is a primitive, it will be converted into /// a vector of strings as described in [`PrimitiveValue::to_multi_str`]. /// /// Returns an error if the value is not primitive. /// /// [`PrimitiveValue::to_multi_str`]: ../enum.PrimitiveValue.html#to_multi_str pub fn to_multi_str(&self) -> Result<Cow<[String]>, CastValueError> { match self { Value::Primitive(prim) => Ok(prim.to_multi_str()), _ => Err(CastValueError { requested: "string", got: self.value_type(), }), } } /// Convert the full primitive value into raw bytes. /// /// String values already encoded with the `Str` and `Strs` variants /// are provided in UTF-8. /// /// Returns an error if the value is not primitive. pub fn to_bytes(&self) -> Result<Cow<[u8]>, CastValueError> { match self { Value::Primitive(prim) => Ok(prim.to_bytes()), _ => Err(CastValueError { requested: "bytes", got: self.value_type(), }), } } /// Retrieve and convert the primitive value into an integer. /// /// If the value is a primitive, it will be converted into /// an integer as described in [`PrimitiveValue::to_int`]. /// /// [`PrimitiveValue::to_int`]: ../enum.PrimitiveValue.html#to_int pub fn to_int<T>(&self) -> Result<T, ConvertValueError> where T: Clone, T: NumCast, T: FromStr<Err = std::num::ParseIntError>, { match self { Value::Primitive(v) => v.to_int::<T>(), _ => Err(ConvertValueError { requested: "integer", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value into a sequence of integers. /// /// If the value is a primitive, it will be converted into /// a vector of integers as described in [PrimitiveValue::to_multi_int]. /// /// [PrimitiveValue::to_multi_int]: ../enum.PrimitiveValue.html#to_multi_int pub fn to_multi_int<T>(&self) -> Result<Vec<T>, ConvertValueError> where T: Clone, T: NumCast, T: FromStr<Err = std::num::ParseIntError>, { match self { Value::Primitive(v) => v.to_multi_int::<T>(), _ => Err(ConvertValueError { requested: "integer", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value /// into a single-precision floating point number. /// /// If the value is a primitive, it will be converted into /// a number as described in [`PrimitiveValue::to_float32`]. /// /// [`PrimitiveValue::to_float32`]: ../enum.PrimitiveValue.html#to_float32 pub fn to_float32(&self) -> Result<f32, ConvertValueError> { match self { Value::Primitive(v) => v.to_float32(), _ => Err(ConvertValueError { requested: "float32", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value /// into a sequence of single-precision floating point numbers. /// /// If the value is a primitive, it will be converted into /// a vector of numbers as described in [`PrimitiveValue::to_multi_float32`]. /// /// [`PrimitiveValue::to_multi_float32`]: ../enum.PrimitiveValue.html#to_multi_float32 pub fn to_multi_float32(&self) -> Result<Vec<f32>, ConvertValueError> { match self { Value::Primitive(v) => v.to_multi_float32(), _ => Err(ConvertValueError { requested: "float32", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value /// into a double-precision floating point number. /// /// If the value is a primitive, it will be converted into /// a number as described in [`PrimitiveValue::to_float64`]. /// /// [`PrimitiveValue::to_float64`]: ../enum.PrimitiveValue.html#to_float64 pub fn to_float64(&self) -> Result<f64, ConvertValueError> { match self { Value::Primitive(v) => v.to_float64(), _ => Err(ConvertValueError { requested: "float64", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value /// into a sequence of double-precision floating point numbers. /// /// If the value is a primitive, it will be converted into /// a vector of numbers as described in [`PrimitiveValue::to_multi_float64`]. /// /// [`PrimitiveValue::to_multi_float64`]: ../enum.PrimitiveValue.html#to_multi_float64 pub fn to_multi_float64(&self) -> Result<Vec<f64>, ConvertValueError> { match self { Value::Primitive(v) => v.to_multi_float64(), _ => Err(ConvertValueError { requested: "float64", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value into a `DicomDate`. /// /// If the value is a primitive, it will be converted into /// a `DicomDate` as described in [`PrimitiveValue::to_date`]. /// pub fn to_date(&self) -> Result<DicomDate, ConvertValueError> { match self { Value::Primitive(v) => v.to_date(), _ => Err(ConvertValueError { requested: "DicomDate", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value into a sequence of `DicomDate`s. /// /// If the value is a primitive, it will be converted into /// a vector of `DicomDate` as described in [`PrimitiveValue::to_multi_date`]. /// pub fn to_multi_date(&self) -> Result<Vec<DicomDate>, ConvertValueError> { match self { Value::Primitive(v) => v.to_multi_date(), _ => Err(ConvertValueError { requested: "DicomDate", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value into a `DicomTime`. /// /// If the value is a primitive, it will be converted into /// a `DicomTime` as described in [`PrimitiveValue::to_time`]. /// pub fn to_time(&self) -> Result<DicomTime, ConvertValueError> { match self { Value::Primitive(v) => v.to_time(), _ => Err(ConvertValueError { requested: "DicomTime", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value into a sequence of `DicomTime`s. /// /// If the value is a primitive, it will be converted into /// a vector of `DicomTime` as described in [`PrimitiveValue::to_multi_time`]. /// pub fn to_multi_time(&self) -> Result<Vec<DicomTime>, ConvertValueError> { match self { Value::Primitive(v) => v.to_multi_time(), _ => Err(ConvertValueError { requested: "DicomTime", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value into a `DicomDateTime`. /// /// If the value is a primitive, it will be converted into /// a `DateTime` as described in [`PrimitiveValue::to_datetime`]. /// pub fn to_datetime( &self, default_offset: FixedOffset, ) -> Result<DicomDateTime, ConvertValueError> { match self { Value::Primitive(v) => v.to_datetime(default_offset), _ => Err(ConvertValueError { requested: "DicomDateTime", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value into a sequence of `DicomDateTime`s. /// /// If the value is a primitive, it will be converted into /// a vector of `DicomDateTime` as described in [`PrimitiveValue::to_multi_datetime`]. /// pub fn to_multi_datetime( &self, default_offset: FixedOffset, ) -> Result<Vec<DicomDateTime>, ConvertValueError> { match self { Value::Primitive(v) => v.to_multi_datetime(default_offset), _ => Err(ConvertValueError { requested: "DicomDateTime", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value into a `DateRange`. /// /// If the value is a primitive, it will be converted into /// a `DateRange` as described in [`PrimitiveValue::to_date_range`]. /// pub fn to_date_range(&self) -> Result<DateRange, ConvertValueError> { match self { Value::Primitive(v) => v.to_date_range(), _ => Err(ConvertValueError { requested: "DateRange", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value into a `TimeRange`. /// /// If the value is a primitive, it will be converted into /// a `TimeRange` as described in [`PrimitiveValue::to_time_range`]. /// pub fn to_time_range(&self) -> Result<TimeRange, ConvertValueError> { match self { Value::Primitive(v) => v.to_time_range(), _ => Err(ConvertValueError { requested: "TimeRange", original: self.value_type(), cause: None, }), } } /// Retrieve and convert the primitive value into a `DateTimeRange`. /// /// If the value is a primitive, it will be converted into /// a `DateTimeRange` as described in [`PrimitiveValue::to_datetime_range`]. /// pub fn to_datetime_range( &self, offset: FixedOffset, ) -> Result<DateTimeRange, ConvertValueError> { match self { Value::Primitive(v) => v.to_datetime_range(offset), _ => Err(ConvertValueError { requested: "DateTimeRange", original: self.value_type(), cause: None, }), } } /// Retrieves the primitive value as a DICOM tag. pub fn to_tag(&self) -> Result<Tag, CastValueError> { match self { Value::Primitive(PrimitiveValue::Tags(v)) => Ok(v[0]), _ => Err(CastValueError { requested: "tag", got: self.value_type(), }), } } } /// Macro for implementing getters to single and multi-values, /// by delegating to `PrimitiveValue`. /// /// Should be placed inside `Value`'s impl block. macro_rules! impl_primitive_getters { ($name_single: ident, $name_multi: ident, $variant: ident, $ret: ty) => { /// Get a single value of the requested type. /// /// If it contains multiple values, /// only the first one is returned. /// An error is returned if the variant is not compatible. pub fn $name_single(&self) -> Result<$ret, CastValueError> { match self { Value::Primitive(v) => v.$name_single(), value => Err(CastValueError { requested: stringify!($name_single), got: value.value_type(), }), } } /// Get a sequence of values of the requested type without copying. /// /// An error is returned if the variant is not compatible. pub fn $name_multi(&self) -> Result<&[$ret], CastValueError> { match self { Value::Primitive(v) => v.$name_multi(), value => Err(CastValueError { requested: stringify!($name_multi), got: value.value_type(), }), } } }; } impl<I, P> Value<I, P> { /// Get a single string value. /// /// If it contains multiple strings, /// only the first one is returned. /// /// An error is returned if the variant is not compatible. /// /// To enable conversions of other variants to a textual representation, /// see [`to_str()`] instead. /// /// [`to_str()`]: #method.to_str pub fn string(&self) -> Result<&str, CastValueError> { match self { Value::Primitive(v) => v.string(), _ => Err(CastValueError { requested: "string", got: self.value_type(), }), } } /// Get the inner sequence of string values /// if the variant is either `Str` or `Strs`. /// /// An error is returned if the variant is not compatible. /// /// To enable conversions of other variants to a textual representation, /// see [`to_str()`] instead. /// /// [`to_str()`]: #method.to_str pub fn strings(&self) -> Result<&[String], CastValueError> { match self { Value::Primitive(v) => v.strings(), _ => Err(CastValueError { requested: "strings", got: self.value_type(), }), } } impl_primitive_getters!(tag, tags, Tags, Tag); impl_primitive_getters!(date, dates, Date, DicomDate); impl_primitive_getters!(time, times, Time, DicomTime); impl_primitive_getters!(datetime, datetimes, DateTime, DicomDateTime); impl_primitive_getters!(uint8, uint8_slice, U8, u8); impl_primitive_getters!(uint16, uint16_slice, U16, u16); impl_primitive_getters!(int16, int16_slice, I16, i16); impl_primitive_getters!(uint32, uint32_slice, U32, u32); impl_primitive_getters!(int32, int32_slice, I32, i32); impl_primitive_getters!(int64, int64_slice, I64, i64); impl_primitive_getters!(uint64, uint64_slice, U64, u64); impl_primitive_getters!(float32, float32_slice, F32, f32); impl_primitive_getters!(float64, float64_slice, F64, f64); } impl<I, P> From<PrimitiveValue> for Value<I, P> { fn from(v: PrimitiveValue) -> Self { Value::Primitive(v) } } #[cfg(test)] mod tests { use super::*; use crate::dicom_value; use crate::header::EmptyObject; use smallvec::smallvec; #[test] fn to_int() { let value = Value::new(dicom_value!(I32, [1, 2, 5])); assert_eq!(value.to_int::<u32>().unwrap(), 1); assert_eq!(value.to_int::<i32>().unwrap(), 1); assert_eq!(value.to_int::<u16>().unwrap(), 1); assert_eq!(value.to_int::<i16>().unwrap(), 1); assert_eq!(value.to_int::<u64>().unwrap(), 1); assert_eq!(value.to_int::<i64>().unwrap(), 1); assert_eq!(value.to_multi_int::<i32>().unwrap(), vec![1, 2, 5]); assert_eq!(value.to_multi_int::<u32>().unwrap(), vec![1, 2, 5]); // sequence values can't be turned to an int let value = Value::<EmptyObject, _>::new_sequence(smallvec![], Length::UNDEFINED); assert!(matches!( value.to_int::<u32>(), Err(ConvertValueError { requested: "integer", original: ValueType::Item, .. }) )); } #[test] fn to_float() { let value = Value::new(dicom_value!(F64, [1., 2., 5.])); assert_eq!(value.to_float32().unwrap(), 1.); assert_eq!(value.to_float64().unwrap(), 1.); assert_eq!(value.to_multi_float32().unwrap(), vec![1., 2., 5.]); assert_eq!(value.to_multi_float64().unwrap(), vec![1., 2., 5.]); // sequence values can't be turned to a number let value = Value::<EmptyObject, _>::new_sequence(smallvec![], Length::UNDEFINED); assert!(matches!( value.to_float32(), Err(ConvertValueError { requested: "float32", original: ValueType::Item, .. }) )); } #[test] fn getters() { assert_eq!( Value::new(dicom_value!(Strs, ["Smith^John"])) .string() .unwrap(), "Smith^John" ); assert_eq!( Value::new(dicom_value!(Strs, ["Smith^John"])) .strings() .unwrap(), &["Smith^John"] ); assert_eq!(Value::new(dicom_value!(I32, [1, 2, 5])).int32().unwrap(), 1,); assert_eq!( Value::new(dicom_value!(I32, [1, 2, 5])) .int32_slice() .unwrap(), &[1, 2, 5], ); assert!(matches!( Value::new(dicom_value!(I32, [1, 2, 5])).uint32(), Err(CastValueError { requested: "uint32", got: ValueType::I32, .. }) )); assert!(matches!( Value::new(dicom_value!(I32, [1, 2, 5])).strings(), Err(CastValueError { requested: "strings", got: ValueType::I32, .. }) )); assert_eq!( Value::new(PrimitiveValue::Date(smallvec![DicomDate::from_ymd( 2014, 10, 12 ) .unwrap()])) .date() .ok(), Some(DicomDate::from_ymd(2014, 10, 12).unwrap()), ); assert_eq!( Value::new(PrimitiveValue::Date( smallvec![DicomDate::from_ymd(2014, 10, 12).unwrap(); 5] )) .dates() .unwrap(), &[DicomDate::from_ymd(2014, 10, 12).unwrap(); 5] ); assert!(matches!( Value::new(PrimitiveValue::Date(smallvec![DicomDate::from_ymd( 2014, 10, 12 ) .unwrap()])) .time(), Err(CastValueError { requested: "time", got: ValueType::Date, .. }) )); } }
use std::fs::File; use std::io::BufReader; use std::io::BufRead; fn get_input() -> Vec<Vec<i32>> { let file = File::open("input.txt").unwrap(); let reader = BufReader::new(file); reader .lines() .map(|line| { line .unwrap() .split('\t') .map(|s| s.parse().unwrap()) .collect() }) .collect() } fn part_1(input: &Vec<Vec<i32>>) -> i32 { input.iter() .map(|nums| { nums.iter() .fold((std::i32::MAX, std::i32::MIN), |(min, max), &item| (min.min(item), max.max(item))) }) .map(|(min, max)| max - min) .sum() } fn part_2(input: &Vec<Vec<i32>>) -> i32 { input.iter() .map(|nums| { let mut quotient = None; 'outer: for i in 0..(nums.len() - 1) { let x = nums[i]; for j in i + 1..nums.len() { let y = nums[j]; if x % y == 0 { quotient = Some(x / y); break 'outer; } else if y % x == 0 { quotient = Some(y / x); break 'outer; } } } quotient.unwrap() }) .sum() } fn main() { let lines = get_input(); println!("Part 1 checksum: {}", part_1(&lines)); println!("Part 2 checksum: {}", part_2(&lines)); }
use tokio::process::Command; use anyhow::{Result, Context}; use async_trait::async_trait; use crate::{ services::model::{Nameable, Ensurable}, helpers::ExitStatusIntoUnit }; static NAME: &str = "Port Forward"; #[derive(Default)] pub struct PortForward { kfp_only: bool, port: String, address: String } impl PortForward { pub fn with_kfp_only(mut self, o: bool) -> Self { self.kfp_only = o; self } pub fn with_port(mut self, p: &str) -> Self { self.port = p.to_owned(); self } pub fn with_address(mut self, a: &str) -> Self { self.address = a.to_owned(); self } } impl Nameable for PortForward { fn name(&self) -> &'static str { NAME } } #[async_trait] impl Ensurable for PortForward { async fn is_present(&self) -> Result<bool> { // TODO: This is a hack: eventually, check to see if the port is already open. Ok(false) } async fn make_present(&self) -> Result<()> { if self.kfp_only { Command::new("kubectl") .arg("port-forward") .arg("--address") .arg(&self.address) .arg("-n") .arg("kubeflow") .arg("svc/ml-pipeline-ui") .arg(format!("{}:80", self.port)) .status().await .status_to_unit() .context("Unable to start the port-forward.")?; } else { Command::new("kubectl") .arg("port-forward") .arg("--address") .arg(&self.address) .arg("-n") .arg("istio-system") .arg("svc/istio-ingressgateway") .arg(format!("{}:80", self.port)) .status().await .status_to_unit() .context("Unable to start the port-forward.")?; } // This is a blocking call... Ok(()) } }
//! THavalon game logic //! //! The game implementation is broken into several layers: //! - [`GameSpec`] describes static game rules based on the number of players, such as the size of each mission and //! which roles may be in the game. //! - [`Game`] holds configuration from when the game is rolled, such as which players have which roles and who the //! assassin is. //! - [`GameState`] and [`Phase`] implement a state machine for while the game is running. //! - [`Interactions`] abstracts over communication with the players. use std::collections::HashMap; use std::fmt; use rand::prelude::*; use serde::{Deserialize, Serialize}; use thiserror::Error; pub mod builder; mod engine; mod interactions; pub mod messages; mod role; pub mod snapshot; mod state; pub use self::messages::{Action, Message}; pub use self::role::*; /// A mission number (from 1 to 5) pub type MissionNumber = u8; /// Game rules determined by the number of players #[derive(Debug, Clone)] pub struct GameSpec { /// Number of players in the game pub players: u8, /// The number of players on each mission pub mission_sizes: [usize; 5], /// Allowed good roles in the game pub good_roles: &'static [Role], /// Allowed evil roles in the game pub evil_roles: &'static [Role], /// The number of players on the good team pub good_players: u8, /// The maximum number of proposals allowed before force activates. Proposals on mission 1 and proposals that are /// sent do not count towards this limit. pub max_proposals: usize, /// The maximum number of times Maeve can obscure voting results in a game pub max_maeve_obscures: usize, /// True if mission 4 requires at least two failures double_fail_mission_four: bool, } /// Fixed information about a player, decided at startup #[derive(Debug, Clone)] pub struct Player { pub name: String, pub role: Role, } /// A collection of players, indexed in various useful ways. #[derive(Debug, Clone)] pub struct Players { players: HashMap<String, Player>, roles: HashMap<Role, String>, good_players: Vec<String>, evil_players: Vec<String>, } #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Deserialize, Serialize)] pub enum Card { Success, Fail, Reverse, } #[derive(Debug, Clone)] pub struct Game { players: Players, info: HashMap<String, RoleDetails>, proposal_order: Vec<String>, assassin: String, priority_target: PriorityTarget, spec: &'static GameSpec, } #[derive(Debug, Clone, Error)] pub enum CreateGameError { #[error("{0}-player games not supported")] UnsupportedSize(usize), } impl Game { pub fn roll(mut names: Vec<String>) -> Result<Game, CreateGameError> { let spec = GameSpec::for_players(names.len())?; let mut rng = thread_rng(); let good_roles = spec .good_roles .choose_multiple(&mut rng, spec.good_players()); let evil_roles = spec .evil_roles .choose_multiple(&mut rng, spec.evil_players()); names.shuffle(&mut rng); let mut players = Players::new(); for (role, name) in good_roles.chain(evil_roles).cloned().zip(names.into_iter()) { players.add_player(name, role); } let assassin = players .evil_players() .choose(&mut rng) .cloned() .expect("Could not choose an assassin, game contained no evil players"); let mut priority_targets = Vec::new(); if players.has_role(Role::Merlin) { priority_targets.push(PriorityTarget::Merlin); } if players.has_role(Role::Tristan) && players.has_role(Role::Iseult) { priority_targets.push(PriorityTarget::Lovers); } // TODO: Guinevere let priority_target = priority_targets .choose(&mut rng) .copied() .unwrap_or(PriorityTarget::None); let mut info = HashMap::with_capacity(players.len()); for player in players.iter() { info.insert( player.name.clone(), player.role.generate_info( &mut rng, &player.name, spec, &players, &assassin, priority_target, ), ); } let mut proposal_order = info.keys().cloned().collect::<Vec<_>>(); proposal_order.shuffle(&mut rng); Ok(Game { players, info, proposal_order, assassin, priority_target, spec, }) } pub fn proposal_order(&self) -> &[String] { self.proposal_order.as_slice() } /// Find the next player in proposal order after the given one. pub fn next_proposer(&self, player: &str) -> &str { let index = self .proposal_order .iter() .position(|p| *p == player) .unwrap(); if index == self.proposal_order.len() - 1 { &self.proposal_order[0] } else { &self.proposal_order[index + 1] } } /// The number of players in the game pub fn size(&self) -> usize { self.proposal_order.len() } /// Look up the display name associated with a given role, if it exists. pub fn display_name_from_role(&self, role: Role) -> Option<&String> { self.info.iter().find_map(|(player, info)| { if info.get_role() == role { Some(player) } else { None } }) } } impl Players { fn new() -> Players { Players { players: HashMap::new(), roles: HashMap::new(), good_players: Vec::new(), evil_players: Vec::new(), } } fn add_player(&mut self, name: String, role: Role) { self.roles.insert(role, name.clone()); if role.is_good() { self.good_players.push(name.clone()); } else { self.evil_players.push(name.clone()); } self.players.insert(name.clone(), Player { name, role }); } fn by_role(&self, role: Role) -> Option<&Player> { self.roles.get(&role).map(|name| &self.players[name]) } fn has_role(&self, role: Role) -> bool { self.roles.contains_key(&role) } fn by_name(&self, name: &str) -> Option<&Player> { self.players.get(name) } fn is(&self, name: &str, role: Role) -> bool { match self.players.get(name) { Some(player) => player.role == role, None => false, } } fn good_players(&self) -> &[String] { self.good_players.as_slice() } fn evil_players(&self) -> &[String] { self.evil_players.as_slice() } fn iter(&self) -> impl Iterator<Item = &Player> { self.players.values() } fn len(&self) -> usize { self.players.len() } } impl fmt::Display for Card { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(match self { Card::Success => "Success", Card::Fail => "Fail", Card::Reverse => "Reverse", }) } } impl GameSpec { pub fn for_players(players: usize) -> Result<&'static GameSpec, CreateGameError> { match players { 2 => Ok(&TWO_PLAYER), 3 => Ok(&THREE_PLAYER), 4 => Ok(&FOUR_PLAYER), 5 => Ok(&FIVE_PLAYER), 7 => Ok(&SEVEN_PLAYER), 8 => Ok(&EIGHT_PLAYER), _ => Err(CreateGameError::UnsupportedSize(players)), } } pub fn mission_size(&self, mission: MissionNumber) -> usize { self.mission_sizes[mission as usize - 1] } pub fn good_players(&self) -> usize { self.good_players as usize } pub fn evil_players(&self) -> usize { (self.players - self.good_players) as usize } pub fn double_fail_mission_four(&self) -> bool { self.double_fail_mission_four } /// Tests if `role` is allowed in games of this size pub fn has_role(&self, role: Role) -> bool { if role.is_evil() { self.evil_roles.contains(&role) } else { self.good_roles.contains(&role) } } } static FIVE_PLAYER: GameSpec = GameSpec { players: 5, mission_sizes: [2, 3, 2, 3, 3], good_roles: &[ Role::Merlin, Role::Lancelot, Role::Percival, Role::Tristan, Role::Iseult, Role::Nimue, ], evil_roles: &[Role::Mordred, Role::Morgana, Role::Maelegant, Role::Maeve], good_players: 3, max_proposals: 5, max_maeve_obscures: 2, double_fail_mission_four: false, }; static SEVEN_PLAYER: GameSpec = GameSpec { players: 7, mission_sizes: [2, 3, 3, 4, 4], good_roles: &[ Role::Merlin, Role::Lancelot, Role::Percival, Role::Tristan, Role::Iseult, Role::Nimue, ], evil_roles: &[ Role::Mordred, Role::Morgana, Role::Maelegant, Role::Maeve, Role::Agravaine, ], good_players: 4, max_proposals: 7, max_maeve_obscures: 3, double_fail_mission_four: true, }; static EIGHT_PLAYER: GameSpec = GameSpec { players: 8, mission_sizes: [3, 4, 4, 5, 5], good_roles: &[ Role::Merlin, Role::Lancelot, Role::Percival, Role::Tristan, Role::Iseult, Role::Arthur, ], evil_roles: &[ Role::Mordred, Role::Morgana, Role::Maelegant, Role::Maeve, Role::Agravaine, ], good_players: 5, max_proposals: 10, max_maeve_obscures: 3, double_fail_mission_four: true, }; /// Two-player games, for testing static TWO_PLAYER: GameSpec = GameSpec { players: 2, mission_sizes: [1, 1, 2, 2, 2], good_roles: Role::ALL_GOOD, evil_roles: Role::ALL_EVIL, good_players: 1, max_proposals: 2, max_maeve_obscures: 2, double_fail_mission_four: false, }; /// Three-player games, for testing static THREE_PLAYER: GameSpec = GameSpec { players: 3, mission_sizes: [1, 2, 2, 2, 3], good_roles: Role::ALL_GOOD, evil_roles: Role::ALL_EVIL, good_players: 2, max_proposals: 3, max_maeve_obscures: 2, double_fail_mission_four: false, }; static FOUR_PLAYER: GameSpec = GameSpec { players: 4, mission_sizes: [2, 2, 3, 3, 4], good_roles: Role::ALL_GOOD, evil_roles: Role::ALL_EVIL, good_players: 2, max_proposals: 4, max_maeve_obscures: 2, double_fail_mission_four: true, };
use error::Result; use args::Args; use systemd::id128::Id128; use systemd::journal::{Journal, JournalFiles, JournalRecord, JournalSeek}; use std::time::SystemTime; use chrono::*; #[derive(Serialize, Deserialize, Debug)] pub struct Systemd { pub code_file: String, pub code_line: usize, pub code_func: String, } #[derive(Serialize, Deserialize, Debug)] pub enum NetworkManagerLogLevel { Err, Warn, Info, Debug, Trace, } #[derive(Serialize, Deserialize, Debug)] pub enum NetworkManagerDomain { Platform, Rfkill, Ether, Wifi, Bt, Mb, Dhcp4, Dhcp6, Ppp, WifiScan, Ip4, Ip6, Autoip4, Dns, Vpn, Sharing, Supplicant, Agents, Settings, Suspend, Core, Device, Olpc, Wimax, Infiniband, Firewall, Adsl, Bond, Vlan, Bridge, DbusProps, Team, Concheck, Dcb, Dispatch, Audit, Systemd, VpnPlugin, Proxy, } #[derive(Serialize, Deserialize, Debug)] pub struct NetworkManager { pub log_level: NetworkManagerLogLevel, pub domain: NetworkManagerDomain, pub device: Option<String>, pub code_file: String, pub code_line: usize, } #[derive(Serialize, Deserialize, Debug)] pub enum Identifier { Kernel, Systemd(Systemd), NetworkManager(NetworkManager), NetworkManagerDispatcher, WpaSupplicant, } #[derive(Serialize, Deserialize, Debug)] pub struct Entry { message: String, time: DateTime<Utc>, identifier: Identifier, } fn record_to_entry(record: JournalRecord, timestamp: SystemTime) -> Option<Entry> { let identifier = match record.get("SYSLOG_IDENTIFIER")? as &str { "kernel" => Identifier::Kernel, "systemd" => { let code_file = record.get("CODE_FILE")?.clone(); let code_line = if let Ok(code_line) = record.get("CODE_LINE")?.parse::<usize>() { code_line } else { return None; }; let code_func = record.get("CODE_FUNC")?.clone(); Identifier::Systemd(Systemd { code_file, code_line, code_func, }) } "NetworkManager" => { let code_file = record.get("CODE_FILE")?.clone(); let code_line = if let Ok(code_line) = record.get("CODE_LINE")?.parse::<usize>() { code_line } else { return None; }; let log_level = NetworkManagerLogLevel::Info; let domain = NetworkManagerDomain::Core; let device = None; Identifier::NetworkManager(NetworkManager { log_level, domain, device, code_file, code_line, }) } "nm-dispatcher" => Identifier::NetworkManagerDispatcher, "wpa_supplicant" => Identifier::WpaSupplicant, _ => return None, }; let message = record.get("MESSAGE")?.clone(); let time = DateTime::<Utc>::from(timestamp); Some(Entry { message, time, identifier, }) } pub fn watch(_args: &Args) -> Result<()> { let boot_id = Id128::from_boot()?; let mut journal = Journal::open(JournalFiles::All, false, false)?; journal.match_add("_BOOT_ID", boot_id.to_string().as_str())?; journal.seek(JournalSeek::Head)?; let mut entries = vec![]; loop { match journal.next_record()? { Some(record) => { let timestamp = journal.timestamp()?; if let Some(entry) = record_to_entry(record, timestamp) { entries.push(entry); } } None => break, } } for entry in &entries[..1000] { println!("{:?}", entry); } Ok(()) }
use crate::effects; use serde::Deserialize; #[derive(Debug, Deserialize)] pub enum Index { Slider(effects::Slider), Angle(effects::Angle), Color(effects::Color), Point(effects::Point), CheckBox(effects::CheckBox), Group(effects::Group), NoValue(effects::NoValue), DropDown(effects::DropDown), CustomValue(effects::CustomValue), Layer(effects::Layer), Tint(effects::Tint), Fill(effects::Fill), Stroke(effects::Stroke), Tritone(effects::Tritone), ProLevels(effects::ProLevels), }
extern crate gui; use gui::*; struct Push; impl State for Push { fn update(&mut self, _: &StateData) -> Transition { println!("Pushing State"); Transition::Push(Box::new(Trans)) } } struct Trans; impl State for Trans { fn update(&mut self, _: &StateData) -> Transition { println!("Transitioning State"); Transition::Trans(Box::new(Pop)) } } struct Pop; impl State for Pop { fn update(&mut self, _: &StateData) -> Transition { println!("Popping State"); Transition::Pop } } #[test] fn states() { Application::new() .run(|_| { Box::new(Push) }); }
use std::mem; use finalfusion::prelude::*; use finalfusion::storage::{Storage, StorageView}; use ndarray::Array2; use crate::check_null; /// Return the numer of rows in the embedding matrix. #[no_mangle] pub unsafe extern "C" fn ff_storage_rows( embeddings: *const Embeddings<VocabWrap, StorageWrap>, ) -> usize { check_null!(embeddings); let embeddings = &*embeddings; embeddings.storage().shape().0 } /// Copy the entire embedding matrix. #[no_mangle] pub unsafe extern "C" fn ff_storage_copy( embeddings: *const Embeddings<VocabWrap, StorageWrap>, ) -> *mut f32 { check_null!(embeddings); let embeddings = &*embeddings; let array = match embeddings.storage() { StorageWrap::MmapArray(mmap) => mmap.view().to_owned(), StorageWrap::NdArray(array) => array.view().to_owned(), StorageWrap::QuantizedArray(quantized) => copy_storage_to_array(quantized.as_ref()), StorageWrap::MmapQuantizedArray(quantized) => copy_storage_to_array(quantized), }; let mut v = array.into_raw_vec(); let ptr = v.as_mut_ptr(); mem::forget(v); ptr } /// Copy storage to an array. /// /// This should only be used for storage types that do not provide /// an ndarray view that can be copied trivially, such as quantized /// storage. fn copy_storage_to_array(storage: &dyn Storage) -> Array2<f32> { let (rows, dims) = storage.shape(); let mut array = Array2::<f32>::zeros((rows, dims)); for idx in 0..rows { array.row_mut(idx).assign(&storage.embedding(idx)); } array }
use clipboard_win::raw::{register_format, format_name, format_name_big}; #[test] fn custom_format_smol() { const NAME: &str = "SMOL"; let format = register_format(NAME).expect("To create format").get(); let name = format_name(format).expect("To get name"); assert_eq!(NAME, name.as_str()); } #[test] fn custom_format_big() { const NAME: &str = "ahdkajhfdsakjfhhdsakjgfdsakjgfdsakjghrdskjghfdskjghrdskjghfdkjghfds;kjghfd;kjgfdsjgfdskjgbfdkjgfdgkjfdsahgkjfdghkjfdgkjfdgfdkjgbfdkjgsakjdhsakjdhs"; let format = match register_format(NAME) { Some(format) => format.get(), None => { panic!("Failed to register format: {}", std::io::Error::last_os_error()); }, }; let name = format_name_big(format).expect("To get name"); assert_eq!(NAME, name.as_str()); }
pub fn a(_input: &str) -> String { format!("aaa") } pub fn b(input: &str) -> String { let mut total = 0; for line in input.lines() { let mass = line.parse::<i32>().unwrap(); let mut res = (mass / 3) - 2; total += res; while (res / 3) - 2 > 0 { res = (res / 3) - 2; total += res } } total.to_string() }
// Adaptive priority queue implementation // by deNULL use std::ptr; use std::fmt; use std::ops::{Index,IndexMut}; use std::cmp::Ordering; macro_rules! parent { ($i:expr) => (($i - 1) >> 1) } macro_rules! left { ($i:expr) => (($i << 1) + 1) } macro_rules! right { ($i:expr) => (($i << 1) + 2) } pub struct APNode<T> { pub key: f64, index: usize, pub data: T, pub removed: bool } pub struct APQueue<T> { heap: Vec<APNode<T>>, pub size: usize } impl<T> APNode<T> { } impl<T> PartialOrd for APNode<T> { fn partial_cmp(&self, other: &APNode<T>) -> Option<Ordering> { self.key.partial_cmp(&other.key) } } impl<T> Ord for APNode<T> { fn cmp(&self, other: &APNode<T>) -> Ordering { self.partial_cmp(other).unwrap_or(Ordering::Equal) } } impl<T> PartialEq for APNode<T> { fn eq(&self, other: &APNode<T>) -> bool { (self.key - other.key).abs() < 1e-10f64 } } impl<T> Eq for APNode<T> {} impl<T> APQueue<T> { pub fn new() -> APQueue<T> { APQueue { heap: vec![], size: 0 } } fn swap(&mut self, i: usize, j: usize) { self.heap.swap(i, j); self[i].index = i; self[j].index = j; } fn upheap(&mut self, index: usize) -> &APNode<T> { let mut i = index; let mut p; while i > 0 { p = parent!(i); if self[i] > self[p] { break } self.swap(i, p); i = p; } &self[i] } fn downheap(&mut self, index: usize) -> &APNode<T> { let mut i = index; let mut l = left!(index); let mut r; let mut bi = index; while l < self.size { bi = l; r = l + 1; if (r < self.size) && (self[l] > self[r]) { bi = r; } if self[bi] >= self[i] { return &self[i] } self.swap(i, bi); i = bi; l = left!(i); } &self[bi] } fn bubble(&mut self, index: usize) { if (index > 0) && (self[index] < self[parent!(index)]) { self.upheap(index); } else { self.downheap(index); } } pub fn peek(&self) -> &APNode<T> { &self[0] } pub fn poll(&mut self) -> APNode<T> { self.trim(); self.size -= 1; let node = self.heap.swap_remove(0); if self.size > 0 { self.downheap(0); } node } pub fn insert(&mut self, key: f64, data: T) -> &APNode<T> { let index = self.size; let node = APNode { key: key, index: index, data: data, removed: false }; if self.size < self.heap.len() { self.heap[self.size] = node } else { self.heap.push(node) } self.size += 1; self.upheap(index) } pub fn remove_at(&mut self, index: usize) -> APNode<T> { self.trim(); self.size -= 1; let mut node = self.heap.swap_remove(index); node.removed = true; if index < self.size { self[index].index = index; self.bubble(index); } node } pub fn remove(&mut self, node: &APNode<T>) { if !node.removed { self.remove_at(node.index); } } pub fn is_empty(&self) -> bool { self.size == 0 } pub fn trim(&mut self) { self.heap.truncate(self.size) } } impl<T> Index<usize> for APQueue<T> { type Output = APNode<T>; fn index<'a>(&'a self, index: usize) -> &'a APNode<T> { &self.heap[index] } } impl<T> IndexMut<usize> for APQueue<T> { fn index_mut<'a>(&'a mut self, index: usize) -> &'a mut APNode<T> { &mut self.heap[index] } } impl<T> fmt::Display for APQueue<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut index = 0; write!(f, "(APQueue, size = {}, [", self.size); while index < self.size { if index > 0 { write!(f, ", "); } write!(f, "{}", self[index].key); index += 1; } write!(f, "])") } }
use std::{collections::HashMap, path::Path}; use anyhow::{anyhow, Result}; use las_rs::Builder; use crate::las::{LASReader, LASWriter}; use super::{PointReader, PointWriter, SeekToPoint}; pub trait PointReadAndSeek: PointReader + SeekToPoint {} impl<T: PointReader + SeekToPoint> PointReadAndSeek for T {} type ReaderFactoryFn = dyn Fn(&Path) -> Result<Box<dyn PointReadAndSeek>>; type WriterFactoryFn = dyn Fn(&Path) -> Result<Box<dyn PointWriter>>; /// Factory that can create `PointReader` and `PointWriter` objects based on file extensions. Use this if you have a file path /// and just want to create a `PointReader` or `PointWriter` from this path, without knowing the type of file. The `Default` /// implementation supports all file formats that Pasture natively works with, custom formats can be registered using the /// `register_...` functions. An extension in this context is whatever [`Path::extension()`](Path::extension) returns for a valid file path pub struct IOFactory { reader_factories: HashMap<String, Box<ReaderFactoryFn>>, writer_factories: HashMap<String, Box<WriterFactoryFn>>, } impl IOFactory { /// Try to create a `PointReader` that can read from the given `file`. This function will fail if `file` has /// a format that is unsupported by Pasture, or if there are any I/O errors while trying to access `file`. pub fn make_reader(&self, file: &Path) -> Result<Box<dyn PointReadAndSeek>> { let extension = file.extension().ok_or_else(|| { anyhow!( "File extension could not be determined from path {}", file.display() ) })?; let extension_str = extension.to_str().ok_or_else(|| { anyhow!( "File extension of path {} is no valid Unicode string", file.display() ) })?; let extension_str_lower = extension_str.to_lowercase(); let factory = self .reader_factories .get(extension_str_lower.as_str()) .ok_or_else(|| { anyhow!( "Reading from point cloud files with extension {} is not supported", extension_str ) })?; factory(file) } /// Try to create a `PointWriter` for writing into the given `file`. This function will fail if `file` has /// a format that is unsupported by Pasture, or if there are any I/O errors while trying to access `file`. pub fn make_writer(&self, file: &Path) -> Result<Box<dyn PointWriter>> { let extension = file.extension().ok_or_else(|| { anyhow!( "File extension could not be determined from path {}", file.display() ) })?; let extension_str = extension.to_str().ok_or_else(|| { anyhow!( "File extension of path {} is no valid Unicode string", file.display() ) })?; let extension_str_lower = extension_str.to_lowercase(); let factory = self .writer_factories .get(extension_str_lower.as_str()) .ok_or_else(|| { anyhow!( "Writing to point cloud files with extension {} is not supported", extension_str ) })?; factory(file) } /// Returns `true` if the associated `IOFactory` supports creating `PointReader` objects for the given /// file `extension` pub fn supports_reading_from(&self, extension: &str) -> bool { let extension_lower = extension.to_lowercase(); self.reader_factories.contains_key(extension_lower.as_str()) } /// Returns `true` if the associated `IOFactory` supports creating `PointWriter` objects for the given /// file `extension` pub fn supports_writing_to(&self, extension: &str) -> bool { let extension_lower = extension.to_lowercase(); self.writer_factories.contains_key(extension_lower.as_str()) } /// Register a new readable file extension with the associated `IOFactory`. The `reader_factory` will be called whenever /// `extension` is encountered as a file extension in `make_reader`. Returns the previous reader factory function that /// was registered for `extension`, if there was any. File extensions are treated as lower-case internally, so if the /// extension `.FOO` is registered here, it will match `file.foo` and `file.FOO` (and all case-variations thereof). pub fn register_reader_for_extension< F: Fn(&Path) -> Result<Box<dyn PointReadAndSeek>> + 'static, >( &mut self, extension: &str, reader_factory: F, ) -> Option<Box<ReaderFactoryFn>> { let extension_lower = extension.to_lowercase(); self.reader_factories .insert(extension_lower, Box::new(reader_factory)) } /// Register a new writeable file extension with the associated `IOFactory`. The `writer_factory` will be called whenever /// `extension` is encountered as a file extension in `make_writer`. Returns the previous writer factory function that /// was registered for `extension`, if there was any. File extensions are treated as lower-case internally, so if the /// extension `.FOO` is registered here, it will match `file.foo` and `file.FOO` (and all case-variations thereof). pub fn register_writer_for_extension<F: Fn(&Path) -> Result<Box<dyn PointWriter>> + 'static>( &mut self, extension: &str, writer_factory: F, ) -> Option<Box<WriterFactoryFn>> { let extension_lower = extension.to_lowercase(); self.writer_factories .insert(extension_lower, Box::new(writer_factory)) } } impl Default for IOFactory { fn default() -> Self { let mut factory = Self { reader_factories: Default::default(), writer_factories: Default::default(), }; factory.register_reader_for_extension("las", |path| { let reader = LASReader::from_path(path)?; Ok(Box::new(reader)) }); factory.register_writer_for_extension("las", |path| { let header = Builder::from((1, 4)).into_header()?; let writer = LASWriter::from_path_and_header(path, header)?; Ok(Box::new(writer)) }); factory.register_reader_for_extension("laz", |path| { let reader = LASReader::from_path(path)?; Ok(Box::new(reader)) }); factory.register_writer_for_extension("laz", |path| { let header = Builder::from((1, 4)).into_header()?; let writer = LASWriter::from_path_and_header(path, header)?; Ok(Box::new(writer)) }); factory } } #[cfg(test)] mod tests { use super::*; #[test] fn io_factory_ignores_extension_case() { let mut factory: IOFactory = Default::default(); assert!(factory.supports_reading_from("las")); assert!(factory.supports_reading_from("LAS")); assert!(factory.supports_writing_to("las")); assert!(factory.supports_writing_to("LAS")); factory.register_reader_for_extension("FOO", |_path| unimplemented!()); factory.register_writer_for_extension("FOO", |_path| unimplemented!()); assert!(factory.supports_reading_from("foo")); assert!(factory.supports_reading_from("FOO")); assert!(factory.supports_writing_to("foo")); assert!(factory.supports_writing_to("FOO")); } }
use fancy_regex::{NoExpand, Regex}; use nu_engine::CallExt; use nu_protocol::{ ast::{Call, CellPath}, engine::{Command, EngineState, Stack}, Category, Example, PipelineData, ShellError, Signature, Span, Spanned, SyntaxShape, Value, }; use std::sync::Arc; struct Arguments { all: bool, find: String, replace: String, column_paths: Vec<CellPath>, literal_replace: bool, no_regex: bool, } #[derive(Clone)] pub struct SubCommand; impl Command for SubCommand { fn name(&self) -> &str { "str replace" } fn signature(&self) -> Signature { Signature::build("str replace") .required("find", SyntaxShape::String, "the pattern to find") .required("replace", SyntaxShape::String, "the replacement pattern") .rest( "rest", SyntaxShape::CellPath, "optionally find and replace text by column paths", ) .switch("all", "replace all occurrences of find string", Some('a')) .switch( "no-expand", "do not expand the replace parameter as a regular expression", Some('n'), ) .switch( "string", "do not use regular expressions for string find and replace", Some('s'), ) .category(Category::Strings) } fn usage(&self) -> &str { "Find and replace text" } fn search_terms(&self) -> Vec<&str> { vec!["search", "shift", "switch"] } fn run( &self, engine_state: &EngineState, stack: &mut Stack, call: &Call, input: PipelineData, ) -> Result<PipelineData, ShellError> { operate(engine_state, stack, call, input) } fn examples(&self) -> Vec<Example> { vec![ Example { description: "Find and replace contents with capture group", example: "'my_library.rb' | str replace '(.+).rb' '$1.nu'", result: Some(Value::String { val: "my_library.nu".to_string(), span: Span::test_data(), }), }, Example { description: "Find and replace all occurrences of find string", example: "'abc abc abc' | str replace -a 'b' 'z'", result: Some(Value::String { val: "azc azc azc".to_string(), span: Span::test_data(), }), }, Example { description: "Find and replace all occurrences of find string in table", example: "[[ColA ColB ColC]; [abc abc ads]] | str replace -a 'b' 'z' ColA ColC", result: Some(Value::List { vals: vec![Value::Record { cols: vec!["ColA".to_string(), "ColB".to_string(), "ColC".to_string()], vals: vec![ Value::String { val: "azc".to_string(), span: Span::test_data(), }, Value::String { val: "abc".to_string(), span: Span::test_data(), }, Value::String { val: "ads".to_string(), span: Span::test_data(), }, ], span: Span::test_data(), }], span: Span::test_data(), }), }, Example { description: "Find and replace contents without using the replace parameter as a regular expression", example: r#"'dogs_$1_cats' | str replace '\$1' '$2' -n"#, result: Some(Value::String { val: "dogs_$2_cats".to_string(), span: Span::test_data(), }), }, Example { description: "Find and replace the first occurence using string replacement *not* regular expressions", example: r#"'c:\some\cool\path' | str replace 'c:\some\cool' '~' -s"#, result: Some(Value::String { val: "~\\path".to_string(), span: Span::test_data(), }), }, Example { description: "Find and replace all occurences using string replacement *not* regular expressions", example: r#"'abc abc abc' | str replace -a 'b' 'z' -s"#, result: Some(Value::String { val: "azc azc azc".to_string(), span: Span::test_data(), }), }, Example { description: "Find and replace with fancy-regex", example: r#"'a sucessful b' | str replace '\b([sS])uc(?:cs|s?)e(ed(?:ed|ing|s?)|ss(?:es|ful(?:ly)?|i(?:ons?|ve(?:ly)?)|ors?)?)\b' '${1}ucce$2'"#, result: Some(Value::String { val: "a successful b".to_string(), span: Span::test_data(), }), }, Example { description: "Find and replace with fancy-regex", example: r#"'GHIKK-9+*' | str replace '[*[:xdigit:]+]' 'z'"#, result: Some(Value::String { val: "GHIKK-z+*".to_string(), span: Span::test_data(), }), }, ] } } fn operate( engine_state: &EngineState, stack: &mut Stack, call: &Call, input: PipelineData, ) -> Result<PipelineData, ShellError> { let head = call.head; let find: Spanned<String> = call.req(engine_state, stack, 0)?; let replace: Spanned<String> = call.req(engine_state, stack, 1)?; let literal_replace = call.has_flag("no-expand"); let no_regex = call.has_flag("string"); let options = Arc::new(Arguments { all: call.has_flag("all"), find: find.item, replace: replace.item, column_paths: call.rest(engine_state, stack, 2)?, literal_replace, no_regex, }); input.map( move |v| { if options.column_paths.is_empty() { action(&v, &options, head) } else { let mut ret = v; for path in &options.column_paths { let opt = options.clone(); let r = ret.update_cell_path( &path.members, Box::new(move |old| action(old, &opt, head)), ); if let Err(error) = r { return Value::Error { error }; } } ret } }, engine_state.ctrlc.clone(), ) } struct FindReplace<'a>(&'a str, &'a str); fn action( input: &Value, Arguments { find, replace, all, literal_replace, no_regex, .. }: &Arguments, head: Span, ) -> Value { match input { Value::String { val, .. } => { let FindReplace(find, replacement) = FindReplace(find, replace); if *no_regex { // just use regular string replacement vs regular expressions if *all { Value::String { val: val.replace(find, replacement), span: head, } } else { Value::String { val: val.replacen(find, replacement, 1), span: head, } } } else { // use regular expressions to replace strings let regex = Regex::new(find); match regex { Ok(re) => { if *all { Value::String { val: { if *literal_replace { re.replace_all(val, NoExpand(replacement)).to_string() } else { re.replace_all(val, replacement).to_string() } }, span: head, } } else { Value::String { val: { if *literal_replace { re.replace(val, NoExpand(replacement)).to_string() } else { re.replace(val, replacement).to_string() } }, span: head, } } } Err(_) => Value::String { val: val.to_string(), span: head, }, } } } other => Value::Error { error: ShellError::UnsupportedInput( format!( "Input's type is {}. This command only works with strings.", other.get_type() ), head, ), }, } } #[cfg(test)] mod tests { use super::*; use super::{action, Arguments, SubCommand}; #[test] fn test_examples() { use crate::test_examples; test_examples(SubCommand {}) } #[test] fn can_have_capture_groups() { let word = Value::String { val: "Cargo.toml".to_string(), span: Span::test_data(), }; let options = Arguments { find: String::from("Cargo.(.+)"), replace: String::from("Carga.$1"), column_paths: vec![], literal_replace: false, all: false, no_regex: false, }; let actual = action(&word, &options, Span::test_data()); assert_eq!( actual, Value::String { val: "Carga.toml".to_string(), span: Span::test_data() } ); } }
#![windows_subsystem = "windows"] use std::sync::Arc; mod examples; use crate::examples::{ compute_mandel_and_save, compute_shader_multiply, copy_buffers, graphics_pipeline, graphics_window, image_clear_and_save, vulkano_particles, }; use vulkano::device::{Device, DeviceExtensions, Features, Queue}; use vulkano::instance::{Instance, PhysicalDevice}; fn main() { let (device, queue, instance) = init_vulkan(); // copy_buffers(device.clone(), queue.clone()); // // compute_shader_multiply(device.clone(), queue.clone()); // // image_clear_and_save(device.clone(), queue.clone()); // // compute_mandel_and_save(device.clone(), queue.clone()); // // graphics_pipeline(device.clone(), queue.clone()); // graphics_window(device.clone(), queue.clone(), instance.clone()); vulkano_particles(device.clone(), queue.clone(), instance.clone()); } fn init_vulkan() -> (Arc<Device>, Arc<Queue>, Arc<Instance>) { println!("INIT VULKAN"); let instance = Instance::new(None, &vulkano_win::required_extensions(), None) .expect("failed to create instance"); let physical_dev = { PhysicalDevice::enumerate(&instance) .next() .expect("no device available") }; println!("Physical Device: {}", physical_dev.name()); let queue_family = physical_dev .queue_families() .find(|&q| q.supports_graphics() && q.supports_compute()) .expect("couldn't find a queue with graphical and compute capabilities"); let (device, mut queues) = { Device::new( physical_dev, &Features { fill_mode_non_solid: true, ..Features::none() }, &DeviceExtensions { khr_storage_buffer_storage_class: true, khr_swapchain: true, ..DeviceExtensions::none() }, [(queue_family, 0.5)].iter().cloned(), ) .expect("failed to create device") }; let queue = queues.next().unwrap(); (device.clone(), queue.clone(), instance.clone()) }
#[allow(unused_imports)] use super::combinator::*; use super::*; #[allow(unused_imports)] use std::collections::HashMap; #[allow(unused_imports)] use std::io::{stderr, Write}; impl ParserData for () { fn string(_: (usize, usize), _: String) -> Self { () } fn null() -> Self { () } fn data(_: (usize, usize), _: &str, _: &mut Parser<()>) -> Self { () } fn is_null(&self) -> bool { false } } #[test] fn test_parse_str() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_str("\u{3042}".to_string())); match test_parser.parse("あああ") { Ok(()) => { assert_eq!(test_parser.pos, 1); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_str("aaa".to_string())); match test_parser.parse("aaa") { Ok(()) => { assert_eq!(test_parser.pos, 3); } Err(_) => { panic!("Parse Failed.") } } } } #[test] fn test_parse_any() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_any()); match test_parser.parse("あああ") { Ok(()) => { assert_eq!(test_parser.pos, 1); } Err(_) => { panic!("Parse Failed.") } } } } #[test] fn test_parse_any_should_fail() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_any()); match test_parser.parse("") { Ok(_) => { panic!("unexpected parse successful"); } Err(_) => { assert_eq!(1, 1); } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_any()); match test_parser.parse("\n") { Ok(_) => { panic!("unexpected parse successful"); } Err(_) => { assert_eq!(1, 1); } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_str("aaa".to_string())); match test_parser.parse("b") { Ok(_) => { panic!("unexpected parse successful"); } Err(_) => { assert_eq!(1, 1); } } } } #[test] fn test_parse_range() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_range("a-c".to_string())); match test_parser.parse("c") { Ok(()) => { assert_eq!(test_parser.pos, 1); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_range("ab-c".to_string())); match test_parser.parse("b") { Ok(()) => { assert_eq!(test_parser.pos, 1); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_range("ab-cde-f".to_string())); match test_parser.parse("d") { Ok(()) => { assert_eq!(test_parser.pos, 1); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_range(r"\--/".to_string())); match test_parser.parse(".") { Ok(()) => { assert_eq!(test_parser.pos, 1); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_range("a-zあ-ん".to_string())); match test_parser.parse("か") { Ok(()) => { assert_eq!(test_parser.pos, 1); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_range("㐀-龯".to_string())); match test_parser.parse("成田") { Ok(()) => { assert_eq!(test_parser.pos, 1); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_range("㐀-龯ぁ-んァ-ヶa-zA-Z_ー".to_string()), ); match test_parser.parse("_成田") { Ok(()) => { assert_eq!(test_parser.pos, 1); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_range("a-".to_string())); match test_parser.parse("a") { Ok(_) => { assert_eq!(test_parser.pos, 1); } Err(_) => { panic!("Parse failed."); } } } } #[test] fn test_parse_range_should_fail() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_range("あいう".to_string())); match test_parser.parse("") { Ok(_) => { panic!("unexpected parse successful"); } Err(_) => { assert_eq!(1, 1); } } } } #[test] fn test_parse_many() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_many(parse_range("㐀-龯ぁ-んァ-ヶa-zA-Z_ー".to_string())), ); match test_parser.parse("_") { Ok(()) => { assert_eq!(test_parser.pos, 1); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_many(parse_range("㐀-龯ぁ-んァ-ヶa-zA-Z_ー".to_string())), ); match test_parser.parse("") { Ok(()) => { assert_eq!(test_parser.pos, 0); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_many(parse_range( "㐀-龯ぁ-んァ-ヶa-zA-Za-zA-Z_ー".to_string(), )), ); match test_parser.parse("成田fdsfsfdojilkじょい") { Ok(()) => { assert_eq!(test_parser.pos, 17); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_many(parse_range("0-9".to_string())), ); match test_parser.parse("456789") { Ok(()) => { assert_eq!(test_parser.pos, 6); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_many(parse_range("㐀-龯ぁ-んァ-ヶa-zA-Z_ー".to_string())), ); match test_parser.parse("hello world") { Ok(()) => { assert_eq!(test_parser.pos, 5); } Err(_) => { panic!("Parse Failed.") } } } } #[test] fn test_parse_many_should_fail() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_range("0-9".to_string())); match test_parser.parse("abcd") { Ok(_) => { panic!("unexpected parse successful"); } Err(_) => { assert_eq!(1, 1); } } } } #[test] fn test_parse_more_than_one() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_more_than_one(parse_range("0-9".to_string())), ); match test_parser.parse("1234567") { Ok(()) => { assert_eq!(test_parser.pos, 7); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_more_than_one(parse_str(" ".to_string())), ); match test_parser.parse(" ") { Ok(()) => { assert_eq!(test_parser.pos, 5); } Err(_) => { panic!("Parse Failed.") } } } } #[test] fn test_parse_more_than_one_should_fail() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_more_than_one(parse_range("0-9".to_string())), ); match test_parser.parse("") { Ok(_) => { panic!("unexpected parse successful"); } Err(_) => { assert_eq!(1, 1); } } } } #[test] fn test_parse_not() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_not(parse_str("a".to_string()))); match test_parser.parse("bbb") { Ok(()) => { assert_eq!(test_parser.pos, 0); } Err(_) => { panic!("Parse Failed.") } } } } #[test] fn test_parse_not_should_fail() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule("Start".to_string(), parse_not(parse_str("a".to_string()))); match test_parser.parse("abb") { Ok(_) => { panic!("unexpected parse successful") } Err(_) => { assert_eq!(1, 1); } } } } #[test] fn test_parse_seq() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_seq(vec![ parse_str("hello".to_string()), parse_many(parse_str(" ".to_string())), parse_str("world".to_string()), ]), ); match test_parser.parse("hello world") { Ok(()) => { assert_eq!(test_parser.pos, 11); } Err(_) => { panic!("Parse Failed.") } } } { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_seq(vec![ parse_str("hello".to_string()), parse_many(parse_str(" ".to_string())), parse_many(parse_range("㐀-龯ぁ-んァ-ヶa-zA-Z_ー".to_string())), parse_or(vec![ parse_many(parse_str(" ".to_string())), parse_str("".to_string()), ]), parse_str("!".to_string()), ]), ); match test_parser.parse("hello 永田!") { Ok(()) => { assert_eq!(test_parser.pos, 9); } Err(_) => { panic!("Parse Failed.") } } } } #[test] fn test_parse_or() { { let mut test_parser = Parser::<()>::new(); test_parser.add_rule( "Start".to_string(), parse_or(vec![ parse_str("good bye".to_string()), parse_str("hello".to_string()), parse_str("good morning".to_string()), ]), ); match test_parser.parse("good morning world") { Ok(()) => { assert_eq!(test_parser.pos, 12); } Err(_) => { panic!("Parse Failed.") } } } } #[test] fn test_combinators() { #[derive(Clone)] enum GreetingData { StringData(String), Greeting((String, String)), Greetings(Vec<(String, String)>), None, } impl GreetingData { fn get_string_data(&self) -> String { if let Self::StringData(str) = self { str.clone() } else { "".to_string() } } } impl ParserData for GreetingData { fn string(str: String) -> Self { Self::StringData(str) } fn null() -> Self { Self::None } fn data(name: String, parser: &mut Parser<GreetingData>) -> Self { fn extract_string_data(data: Option<GreetingData>, name: &str, rule: &str) -> String { match data { Some(data) => data.get_string_data(), None => { let _ = writeln!( stderr(), "Could not find \"{}\" in the grammar to parse \"{}\"", name, rule ); "".to_string() } } } match name.as_str() { "Greeting" => match parser.get_data("Greeting".to_string()) { Some(data) => match data { Self::Greeting(greeting) => Self::Greetings(vec![ greeting, ( extract_string_data( parser.get_data("name".to_string()), "name", "Greeting", ), extract_string_data( parser.get_data("greetword".to_string()), "greetword", "Greeting", ), ), ]), Self::Greetings(mut greetings) => { greetings.push(( extract_string_data( parser.get_data("name".to_string()), "name", "Greeting", ), extract_string_data( parser.get_data("greetword".to_string()), "greetword", "Greeting", ), )); Self::Greetings(greetings) } _ => { let _ = writeln!( stderr(), "Greeting does not have type Greeting or Greetings." ); Self::None } }, None => Self::Greeting(( extract_string_data( parser.get_data("name".to_string()), "name", "Greeting", ), extract_string_data( parser.get_data("greetword".to_string()), "greetword", "Greeting", ), )), }, "Greetings" => match parser.get_data("Greeting".to_string()) { Some(greeting) => match greeting { Self::Greeting(data) => Self::Greetings(vec![data]), Self::Greetings(data) => Self::Greetings(data), _ => Self::Greetings(vec![]), }, None => Self::Greetings(vec![]), }, "Start" => parser.get_data("Greetings".to_string()).expect("Start"), _ => Self::None, } } fn is_null(&self) -> bool { if let Self::None = self { true } else { false } } } let mut test_parser = Parser::<GreetingData>::new(); test_parser.add_rule( "ID".to_string(), parse_more_than_one(parse_range("㐀-龯ぁ-んァ-ヶa-zA-Z_ー".to_string())), ); test_parser.add_rule( "GreetWord".to_string(), parse_or(vec![ parse_str("Hi".to_string()), parse_str("Hello".to_string()), parse_str("Good morning".to_string()), ]), ); test_parser.add_rule( "Greeting".to_string(), parse_seq(vec![ capture_string( "greetword".to_string(), parse_ref("GreetWord".to_string(), None), ), parse_more_than_one(parse_str(" ".to_string())), capture_string("name".to_string(), parse_ref("ID".to_string(), None)), parse_many(parse_str(" ".to_string())), parse_str("!".to_string()), ]), ); test_parser.add_rule( "Greetings".to_string(), parse_more_than_one(parse_seq(vec![ parse_ref("Greeting".to_string(), None), parse_str("\n".to_string()), ])), ); test_parser.add_rule( "Start".to_string(), parse_ref("Greetings".to_string(), None), ); match test_parser.parse("Hi 永田!\nGood morning 成田!\n") { Ok(greetings) => { assert_eq!(test_parser.pos, 24); match greetings { GreetingData::Greetings(data) => { for s in &data { println!("Name: {}, Greeting: {}", s.0, s.1); } } _ => { panic!("Greetings is not of right type -> Parse Failed.") } } } Err(_) => { panic!("Parse failed at position {}.", test_parser.pos); } } }
use models::{HFile, FixedDateHImage, HPaste, HVideo}; #[derive(Serialize)] pub struct ImageList { pub title: String, pub page_title: String, pub editable: bool, pub images: Vec<FixedDateHImage>, } #[derive(Serialize)] pub struct VideoList { pub title: String, pub page_title: String, pub editable: bool, pub videos: Vec<HVideo>, } #[derive(Serialize)] pub struct PasteList { pub title: String, pub page_title: String, pub pastes: Vec<HPaste>, pub editable: bool, } #[derive(Serialize)] pub struct FileList { pub title: String, pub page_title: String, pub files: Vec<HFile>, pub editable: bool, } #[derive(Serialize)] pub struct ManageImage { pub id: String, pub title: String, pub page_title: String, pub editable: bool, pub date_added: String, pub password: Option<String>, pub img_src: Option<String>, pub is_expiry: bool } #[derive(Serialize)] pub struct ManageVideo { pub id: String, pub title: String, pub page_title: String, pub editable: bool, pub date_added: String, pub is_expiry: bool, pub password: Option<String>, pub vid_src: Option<String> } #[derive(Serialize)] pub struct ManageFile { pub id: String, pub filename: String, pub page_title: String, pub date_added: String, pub is_expiry: bool, pub password: Option<String>, pub editable: bool } #[derive(Serialize)] pub struct ManagePaste { pub id: String, pub title: String, pub page_title: String, pub paste: HPaste, pub editable: bool, } #[derive(Serialize)] pub struct ShowPaste { pub item: HPaste, pub meta_tag: Option<String>, } #[derive(Serialize)] pub struct ShowVideo { pub item: HVideo, pub meta_tag: Option<String>, pub password: bool, } #[derive(Serialize)] pub struct ShowImage { pub password: bool, pub item: FixedDateHImage, pub meta_tag: Option<String>, } #[derive(Serialize)] pub struct ShowAccount { pub user_id: i32, pub first_name: String, pub last_name: Option<String>, pub email: String, pub privilege_level: String, pub resource_count: i64, }
use std::fmt; use crate::lexer::Token; use crate::parser::Type; use crate::parser::Expr; use crate::parser::Stmt; use crate::parser::Fn; use crate::parser::Prog; #[derive(Debug)] pub enum Op { Exit, Label, Goto, Print, Load, If, Push, Pop, Call, Ret, Op(Token), } #[derive(Debug)] pub enum OpArg { Int(i64), Temp(usize), Label(String), } #[derive(Debug)] pub struct Ir { pub ret: Option<OpArg>, pub op: Op, pub arg1: Option<OpArg>, pub arg2: Option<OpArg> } pub struct Env<'a> { vars: Vec<(String, Type, usize)>, generated_labels: usize, outer: Option<&'a Env<'a>>, } impl fmt::Display for Op { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let s = match self { Op::Exit => "exit".to_string(), Op::Label => "label".to_string(), Op::Goto => "go".to_string(), Op::Print => "print".to_string(), Op::Load => "ldr".to_string(), Op::If => "if".to_string(), Op::Push => "push".to_string(), Op::Pop => "pop".to_string(), Op::Call => "call".to_string(), Op::Ret => "ret".to_string(), /* Op::Add => "add", Op::Sub => "sub", Op::Mul => "mul", Op::Div => "div", Op::Eq => "eq", Op::Ne => "ne", */ Op::Op(t) => format!("op({})", t.to_string()), }; write!(f, "{}", s) } } impl fmt::Display for OpArg { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let s = match self { OpArg::Int(i) => format!("{}", i).to_string(), OpArg::Temp(i) => format!("t{}", i).to_string(), OpArg::Label(i) => format!("{}", i).to_string(), }; write!(f, "{}", s) } } impl fmt::Display for Ir { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut s = format!("{} ", self.op).to_string(); if let Some(r) = &self.ret { s = format!("{}{} ", s, r).to_string(); } if let Some(arg1) = &self.arg1 { s = format!("{} {} ", s, arg1).to_string(); } if let Some(arg2) = &self.arg2 { s = format!("{} {}", s, arg2).to_string() }; write!(f, "{}", s) } } fn convert_op(t: &Token) -> Op { match t { Token::OpAdd | Token::OpSub | Token::OpMul | Token::OpDiv | Token::OpRem | Token::CompEqual | Token::CompInEqual | Token::CompGreaterEqual | Token::CompGreater | Token::CompLessEqual | Token::CompLess | Token::CompAnd | Token::CompOr => Op::Op(t.clone()), _ => panic!("op {} not supported", t), } } fn find_var(env: &Env, s: &String) -> Option<usize> { for (v, _v_type, v_temp) in &env.vars { if v == s { return Some(*v_temp); } } match env.outer { Some(e) => find_var(e, s), None => None, } } fn convert_item(env: &Env, t: &Token) -> OpArg { match t { Token::Integer(i) => OpArg::Int(*i), Token::Symbol(s) => { match find_var(env, s) { Some(t) => OpArg::Temp(t), None => panic!("var {} not found", s), } }, _ => panic!("item {} not supported", t), } } fn assemble_call(env: &mut Env, i: usize, name: &String, args: &Vec<Expr>) -> (usize, Vec<Ir>) { let mut irs: Vec<Ir> = vec![]; let mut arg_i = i; for a in args { let (value, mut ir_e) = assemble_expr(env, arg_i, a); irs.append(&mut ir_e); arg_i = arg_i + 1; let ir = Ir { ret: Some(OpArg::Temp(arg_i)), op: Op::Load, arg1: Some(OpArg::Temp(value)), arg2: None, }; irs.push(ir); } for j in 1..(i+1) { let ir = Ir { ret: None, op: Op::Push, arg1: Some(OpArg::Temp(j)), arg2: None, }; irs.push(ir); } for j in 1..(arg_i - i + 1) { let ir = Ir { ret: Some(OpArg::Temp(j)), op: Op::Load, arg1: Some(OpArg::Temp(j + i)), arg2: None, }; irs.push(ir); } let ir = Ir { ret: None, op: Op::Call, arg1: Some(OpArg::Label(name.to_string())), arg2: None, }; irs.push(ir); let ret = i + 1; let ir = Ir { ret: Some(OpArg::Temp(ret)), op: Op::Load, arg1: Some(OpArg::Temp(1)), arg2: None, }; irs.push(ir); for j in (1..(i+1)).rev() { let ir = Ir { ret: Some(OpArg::Temp(j)), op: Op::Pop, arg1: None, arg2: None, }; irs.push(ir); } (ret, irs) } fn assemble_expr(env: &mut Env, mut i: usize, e: &Expr) -> (usize, Vec<Ir>) { match e { Expr::TempStart(_, _) | Expr::TempOp(_, _, _) => { panic!("Shouldn't get unfixed expressions") }, Expr::Op(token, a, b) => { let mut ir = vec![]; let (value_a, mut ir_a) = assemble_expr(env, i, &a); ir.append(&mut ir_a); i = value_a; let (value_b, mut ir_b) = assemble_expr(env, i, &b); ir.append(&mut ir_b); i = value_b; let ret = i + 1; let ir_op = Ir { ret: Some(OpArg::Temp(ret)), op: convert_op(token), arg1: Some(OpArg::Temp(value_a)), arg2: Some(OpArg::Temp(value_b)), }; ir.push(ir_op); (ret, ir) }, Expr::Item(item) => { i = i + 1; let ir = Ir { ret: Some(OpArg::Temp(i)), op: Op::Load, arg1: Some(convert_item(env, item)), arg2: None, }; (i, vec![ir]) }, Expr::Call(name, args) => assemble_call(env, i, name, args), } } fn assemble_stmt_expr(env: &mut Env, t: usize, expr: Box<Expr>) -> (usize, Vec<Ir>) { let (_ret_t, ir) = assemble_expr(env, t, &expr); (t, ir) } fn gen_label_name(env: &mut Env, n: String) -> String { let i = env.generated_labels; env.generated_labels += 1; let mut s = "_".to_string(); let mut e: &Env = env; while let Some(ee) = e.outer { s = format!("{}_", s).to_string(); e = ee; } format!("{}{}_{}", s, n, i).to_string() } fn assemble_stmt_if(env: &mut Env, t: usize, cond: Box<Expr>, then: Box<Stmt>, otherwise: Option<Box<Stmt>>) -> (usize, Vec<Ir>) { let mut ir: Vec<Ir> = vec![]; let (cond_t, mut cond_ir) = assemble_expr(env, t, &cond); let (_then_t, mut then_ir) = assemble_stmt(env, t, *then); let (_other_t, mut other_ir) = match otherwise { Some(o) => assemble_stmt(env, t, *o), None => (t, vec![]), }; /* Generate this here so the numbers increment how I'd expect them to. */ let label_name = gen_label_name(env, "then_end".to_string()); if other_ir.len() > 0 { let label_name = gen_label_name(env, "else_end".to_string()); let other_end_label = Ir { ret: None, op: Op::Label, arg1: Some(OpArg::Label(label_name.to_string())), arg2: None }; other_ir.push(other_end_label); let skip_other_ir = Ir { ret: None, op: Op::Goto, arg1: Some(OpArg::Label(label_name.to_string())), arg2: None }; then_ir.push(skip_other_ir); } let then_end_label = Ir { ret: None, op: Op::Label, arg1: Some(OpArg::Label(label_name.to_string())), arg2: None }; then_ir.push(then_end_label); /* if not true then jump to arg2 label */ let if_ir = Ir { ret: None, op: Op::If, arg1: Some(OpArg::Temp(cond_t)), arg2: Some(OpArg::Label(label_name.to_string())), }; ir.append(&mut cond_ir); ir.push(if_ir); ir.append(&mut then_ir); ir.append(&mut other_ir); /* TODO: should return then_t or other_t or a new t that is the same for both */ (t, ir) } fn assemble_stmt_alloc(env: &mut Env, mut t: usize, var_name: String, var_type: Type, value: Box<Expr>) -> (usize, Vec<Ir>) { let (value_t, mut value_ir) = assemble_expr(env, t, &value); t = t + 1; let ir = Ir { ret: Some(OpArg::Temp(t)), op: Op::Load, arg1: Some(OpArg::Temp(value_t)), arg2: None, }; value_ir.push(ir); env.vars.push((var_name.to_string(), var_type, t)); (t, value_ir) } fn assemble_stmt_assign(env: &mut Env, t: usize, name: String, value: Box<Expr>) -> (usize, Vec<Ir>) { let v_temp = match find_var(env, &name) { Some(t) => t, None => panic!("assign to unknown var {}", name), }; let (value_t, mut value_ir) = assemble_expr(env, t, &value); let ir = Ir { ret: Some(OpArg::Temp(v_temp)), op: Op::Load, arg1: Some(OpArg::Temp(value_t)), arg2: None, }; value_ir.push(ir); (t, value_ir) } fn assemble_stmt_list(env: &mut Env, t: usize, l: Vec<Stmt>) -> (usize, Vec<Ir>) { let mut v: Vec<Ir> = vec![]; let mut n_env = Env { vars: vec![], generated_labels: 0, outer: Some(env), }; let mut n_t = t; for s in l { let (tt, mut vv) = assemble_stmt(&mut n_env, n_t, s); v.append(&mut vv); n_t = tt; } (t, v) } fn assemble_stmt_return(env: &mut Env, t: usize, expr: Option<Box<Expr>>) -> (usize, Vec<Ir>) { let (value_t, mut ir) = match expr { Some(e) => assemble_expr(env, t, &e), None => (t, vec![]), }; let ldr_ret = Ir { ret: Some(OpArg::Temp(1)), op: Op::Load, arg1: Some(OpArg::Temp(value_t)), arg2: None }; ir.push(ldr_ret); let ret = Ir { ret: None, op: Op::Ret, arg1: None, arg2: None }; ir.push(ret); (t, ir) } fn assemble_fn(env: &mut Env, name: String, f: Fn) -> Vec<Ir> { let mut n_env = Env { vars: vec![], generated_labels: 0, outer: Some(env), }; let mut p_t = 0; for (param_name, param_type) in f.params { p_t += 1; n_env.vars.push((param_name, param_type, p_t)); } let (_t, mut ir) = assemble_stmt(&mut n_env, p_t, f.stmts); let fn_label = Ir { ret: None, op: Op::Label, arg1: Some(OpArg::Label(name.to_string())), arg2: None }; ir.insert(0, fn_label); let ret = Ir { ret: None, op: Op::Ret, arg1: None, arg2: None }; ir.push(ret); ir } fn assemble_stmt(env: &mut Env, t: usize, s: Stmt) -> (usize, Vec<Ir>) { match s { Stmt::Return(expr) => assemble_stmt_return(env, t, expr), Stmt::Alloc(var_name, var_type, value) => assemble_stmt_alloc(env, t, var_name, var_type, value), Stmt::Assign(name, value) => assemble_stmt_assign(env, t, name, value), Stmt::If(cond, then, otherwise) => assemble_stmt_if(env, t, cond, then, otherwise), Stmt::List(l) => assemble_stmt_list(env, t, l), Stmt::Expr(e) => assemble_stmt_expr(env, t, e), } } pub fn assemble(p: Prog) -> Vec<Ir> { let mut env = Env { vars: vec![], generated_labels: 0, outer: None }; let mut irs: Vec<Ir> = vec![]; let start = Ir { ret: None, op: Op::Call, arg1: Some(OpArg::Label("main".to_string())), arg2: None }; irs.push(start); let exit = Ir { ret: None, op: Op::Exit, arg1: None, arg2: None }; irs.push(exit); for (n, f) in p.funcs { let mut i = assemble_fn(&mut env, n, f); irs.append(&mut i); } irs }
// Copyright (c) 2015, <daggerbot@gmail.com> // All rights reserved. use ::display::Xid; use ::window::Window; /** Drawable resource identifier type. */ pub type Drawable = Xid; // // Geometry // #[derive(Clone, Copy)] pub struct Geometry { pub root: Window, pub x: i32, pub y: i32, pub width: i32, pub height: i32, pub border_width: i32, pub depth: i32, }
#[doc = "Reader of register RCC_DDRITFCR"] pub type R = crate::R<u32, super::RCC_DDRITFCR>; #[doc = "Writer for register RCC_DDRITFCR"] pub type W = crate::W<u32, super::RCC_DDRITFCR>; #[doc = "Register RCC_DDRITFCR `reset()`'s with value 0x000f_d02a"] impl crate::ResetValue for super::RCC_DDRITFCR { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x000f_d02a } } #[doc = "DDRC1EN\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRC1EN_A { #[doc = "0: Means that the DDRC peripheral\r\n clocks are disabled"] B_0X0 = 0, #[doc = "1: Means that the DDRC peripheral\r\n clocks are enabled"] B_0X1 = 1, } impl From<DDRC1EN_A> for bool { #[inline(always)] fn from(variant: DDRC1EN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRC1EN`"] pub type DDRC1EN_R = crate::R<bool, DDRC1EN_A>; impl DDRC1EN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRC1EN_A { match self.bits { false => DDRC1EN_A::B_0X0, true => DDRC1EN_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRC1EN_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRC1EN_A::B_0X1 } } #[doc = "Write proxy for field `DDRC1EN`"] pub struct DDRC1EN_W<'a> { w: &'a mut W, } impl<'a> DDRC1EN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRC1EN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Means that the DDRC peripheral clocks are disabled"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRC1EN_A::B_0X0) } #[doc = "Means that the DDRC peripheral clocks are enabled"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRC1EN_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } #[doc = "DDRC1LPEN\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRC1LPEN_A { #[doc = "0: means that the peripheral clocks are\r\n disabled in CSLEEP"] B_0X0 = 0, #[doc = "1: means that the peripheral clocks are\r\n enabled in CSLEEP"] B_0X1 = 1, } impl From<DDRC1LPEN_A> for bool { #[inline(always)] fn from(variant: DDRC1LPEN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRC1LPEN`"] pub type DDRC1LPEN_R = crate::R<bool, DDRC1LPEN_A>; impl DDRC1LPEN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRC1LPEN_A { match self.bits { false => DDRC1LPEN_A::B_0X0, true => DDRC1LPEN_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRC1LPEN_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRC1LPEN_A::B_0X1 } } #[doc = "Write proxy for field `DDRC1LPEN`"] pub struct DDRC1LPEN_W<'a> { w: &'a mut W, } impl<'a> DDRC1LPEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRC1LPEN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "means that the peripheral clocks are disabled in CSLEEP"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRC1LPEN_A::B_0X0) } #[doc = "means that the peripheral clocks are enabled in CSLEEP"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRC1LPEN_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w } } #[doc = "DDRC2EN\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRC2EN_A { #[doc = "0: Means that the DDRC peripheral\r\n clocks are disabled"] B_0X0 = 0, #[doc = "1: Means that the DDRC peripheral\r\n clocks are enabled"] B_0X1 = 1, } impl From<DDRC2EN_A> for bool { #[inline(always)] fn from(variant: DDRC2EN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRC2EN`"] pub type DDRC2EN_R = crate::R<bool, DDRC2EN_A>; impl DDRC2EN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRC2EN_A { match self.bits { false => DDRC2EN_A::B_0X0, true => DDRC2EN_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRC2EN_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRC2EN_A::B_0X1 } } #[doc = "Write proxy for field `DDRC2EN`"] pub struct DDRC2EN_W<'a> { w: &'a mut W, } impl<'a> DDRC2EN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRC2EN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Means that the DDRC peripheral clocks are disabled"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRC2EN_A::B_0X0) } #[doc = "Means that the DDRC peripheral clocks are enabled"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRC2EN_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "DDRC2LPEN\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRC2LPEN_A { #[doc = "0: means that the peripheral clocks are\r\n disabled in CSLEEP"] B_0X0 = 0, #[doc = "1: means that the peripheral clocks are\r\n enabled in CSLEEP"] B_0X1 = 1, } impl From<DDRC2LPEN_A> for bool { #[inline(always)] fn from(variant: DDRC2LPEN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRC2LPEN`"] pub type DDRC2LPEN_R = crate::R<bool, DDRC2LPEN_A>; impl DDRC2LPEN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRC2LPEN_A { match self.bits { false => DDRC2LPEN_A::B_0X0, true => DDRC2LPEN_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRC2LPEN_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRC2LPEN_A::B_0X1 } } #[doc = "Write proxy for field `DDRC2LPEN`"] pub struct DDRC2LPEN_W<'a> { w: &'a mut W, } impl<'a> DDRC2LPEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRC2LPEN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "means that the peripheral clocks are disabled in CSLEEP"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRC2LPEN_A::B_0X0) } #[doc = "means that the peripheral clocks are enabled in CSLEEP"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRC2LPEN_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3); self.w } } #[doc = "DDRPHYCEN\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRPHYCEN_A { #[doc = "0: means that the peripheral clocks are\r\n disabled"] B_0X0 = 0, #[doc = "1: means that the peripheral clocks are\r\n enabled"] B_0X1 = 1, } impl From<DDRPHYCEN_A> for bool { #[inline(always)] fn from(variant: DDRPHYCEN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRPHYCEN`"] pub type DDRPHYCEN_R = crate::R<bool, DDRPHYCEN_A>; impl DDRPHYCEN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRPHYCEN_A { match self.bits { false => DDRPHYCEN_A::B_0X0, true => DDRPHYCEN_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRPHYCEN_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRPHYCEN_A::B_0X1 } } #[doc = "Write proxy for field `DDRPHYCEN`"] pub struct DDRPHYCEN_W<'a> { w: &'a mut W, } impl<'a> DDRPHYCEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRPHYCEN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "means that the peripheral clocks are disabled"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRPHYCEN_A::B_0X0) } #[doc = "means that the peripheral clocks are enabled"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRPHYCEN_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4); self.w } } #[doc = "DDRPHYCLPEN\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRPHYCLPEN_A { #[doc = "0: means that the peripheral clocks are\r\n disabled in CSLEEP"] B_0X0 = 0, #[doc = "1: means that the peripheral clocks are\r\n enabled in CSLEEP"] B_0X1 = 1, } impl From<DDRPHYCLPEN_A> for bool { #[inline(always)] fn from(variant: DDRPHYCLPEN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRPHYCLPEN`"] pub type DDRPHYCLPEN_R = crate::R<bool, DDRPHYCLPEN_A>; impl DDRPHYCLPEN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRPHYCLPEN_A { match self.bits { false => DDRPHYCLPEN_A::B_0X0, true => DDRPHYCLPEN_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRPHYCLPEN_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRPHYCLPEN_A::B_0X1 } } #[doc = "Write proxy for field `DDRPHYCLPEN`"] pub struct DDRPHYCLPEN_W<'a> { w: &'a mut W, } impl<'a> DDRPHYCLPEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRPHYCLPEN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "means that the peripheral clocks are disabled in CSLEEP"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRPHYCLPEN_A::B_0X0) } #[doc = "means that the peripheral clocks are enabled in CSLEEP"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRPHYCLPEN_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5); self.w } } #[doc = "DDRCAPBEN\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRCAPBEN_A { #[doc = "0: means that the APB clock is\r\n disabled"] B_0X0 = 0, #[doc = "1: means that the APB clock is\r\n enabled"] B_0X1 = 1, } impl From<DDRCAPBEN_A> for bool { #[inline(always)] fn from(variant: DDRCAPBEN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRCAPBEN`"] pub type DDRCAPBEN_R = crate::R<bool, DDRCAPBEN_A>; impl DDRCAPBEN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRCAPBEN_A { match self.bits { false => DDRCAPBEN_A::B_0X0, true => DDRCAPBEN_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRCAPBEN_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRCAPBEN_A::B_0X1 } } #[doc = "Write proxy for field `DDRCAPBEN`"] pub struct DDRCAPBEN_W<'a> { w: &'a mut W, } impl<'a> DDRCAPBEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRCAPBEN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "means that the APB clock is disabled"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRCAPBEN_A::B_0X0) } #[doc = "means that the APB clock is enabled"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRCAPBEN_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6); self.w } } #[doc = "DDRCAPBLPEN\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRCAPBLPEN_A { #[doc = "0: means that the APB clock is disabled\r\n in CSLEEP"] B_0X0 = 0, #[doc = "1: means that the APB clock is enabled\r\n in CSLEEP"] B_0X1 = 1, } impl From<DDRCAPBLPEN_A> for bool { #[inline(always)] fn from(variant: DDRCAPBLPEN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRCAPBLPEN`"] pub type DDRCAPBLPEN_R = crate::R<bool, DDRCAPBLPEN_A>; impl DDRCAPBLPEN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRCAPBLPEN_A { match self.bits { false => DDRCAPBLPEN_A::B_0X0, true => DDRCAPBLPEN_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRCAPBLPEN_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRCAPBLPEN_A::B_0X1 } } #[doc = "Write proxy for field `DDRCAPBLPEN`"] pub struct DDRCAPBLPEN_W<'a> { w: &'a mut W, } impl<'a> DDRCAPBLPEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRCAPBLPEN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "means that the APB clock is disabled in CSLEEP"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRCAPBLPEN_A::B_0X0) } #[doc = "means that the APB clock is enabled in CSLEEP"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRCAPBLPEN_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7); self.w } } #[doc = "AXIDCGEN\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum AXIDCGEN_A { #[doc = "0: means that the dynamic clock gating\r\n of AXIDCG\\[2:1\\] is disabled during MPU\r\n CRUN,"] B_0X0 = 0, #[doc = "1: means that the dynamic clock gating\r\n of AXIDCG{2:1\\] is enabled during MPU\r\n CRUN"] B_0X1 = 1, } impl From<AXIDCGEN_A> for bool { #[inline(always)] fn from(variant: AXIDCGEN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `AXIDCGEN`"] pub type AXIDCGEN_R = crate::R<bool, AXIDCGEN_A>; impl AXIDCGEN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> AXIDCGEN_A { match self.bits { false => AXIDCGEN_A::B_0X0, true => AXIDCGEN_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == AXIDCGEN_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == AXIDCGEN_A::B_0X1 } } #[doc = "Write proxy for field `AXIDCGEN`"] pub struct AXIDCGEN_W<'a> { w: &'a mut W, } impl<'a> AXIDCGEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: AXIDCGEN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "means that the dynamic clock gating of AXIDCG\\[2:1\\] is disabled during MPU CRUN,"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(AXIDCGEN_A::B_0X0) } #[doc = "means that the dynamic clock gating of AXIDCG{2:1\\] is enabled during MPU CRUN"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(AXIDCGEN_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8); self.w } } #[doc = "DDRPHYCAPBEN\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRPHYCAPBEN_A { #[doc = "0: means that the APB clock is\r\n disabled"] B_0X0 = 0, #[doc = "1: means that the APB clock is\r\n enabled"] B_0X1 = 1, } impl From<DDRPHYCAPBEN_A> for bool { #[inline(always)] fn from(variant: DDRPHYCAPBEN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRPHYCAPBEN`"] pub type DDRPHYCAPBEN_R = crate::R<bool, DDRPHYCAPBEN_A>; impl DDRPHYCAPBEN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRPHYCAPBEN_A { match self.bits { false => DDRPHYCAPBEN_A::B_0X0, true => DDRPHYCAPBEN_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRPHYCAPBEN_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRPHYCAPBEN_A::B_0X1 } } #[doc = "Write proxy for field `DDRPHYCAPBEN`"] pub struct DDRPHYCAPBEN_W<'a> { w: &'a mut W, } impl<'a> DDRPHYCAPBEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRPHYCAPBEN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "means that the APB clock is disabled"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRPHYCAPBEN_A::B_0X0) } #[doc = "means that the APB clock is enabled"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRPHYCAPBEN_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9); self.w } } #[doc = "DDRPHYCAPBLPEN\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRPHYCAPBLPEN_A { #[doc = "0: means that the APB clock is disabled\r\n in CSLEEP"] B_0X0 = 0, #[doc = "1: means that the APB clock is enabled\r\n in CSLEEP"] B_0X1 = 1, } impl From<DDRPHYCAPBLPEN_A> for bool { #[inline(always)] fn from(variant: DDRPHYCAPBLPEN_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRPHYCAPBLPEN`"] pub type DDRPHYCAPBLPEN_R = crate::R<bool, DDRPHYCAPBLPEN_A>; impl DDRPHYCAPBLPEN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRPHYCAPBLPEN_A { match self.bits { false => DDRPHYCAPBLPEN_A::B_0X0, true => DDRPHYCAPBLPEN_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRPHYCAPBLPEN_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRPHYCAPBLPEN_A::B_0X1 } } #[doc = "Write proxy for field `DDRPHYCAPBLPEN`"] pub struct DDRPHYCAPBLPEN_W<'a> { w: &'a mut W, } impl<'a> DDRPHYCAPBLPEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRPHYCAPBLPEN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "means that the APB clock is disabled in CSLEEP"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRPHYCAPBLPEN_A::B_0X0) } #[doc = "means that the APB clock is enabled in CSLEEP"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRPHYCAPBLPEN_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10); self.w } } #[doc = "KERDCG_DLY\n\nValue on reset: 2"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum KERDCG_DLY_A { #[doc = "0: 1 period of ddrc_ker_ck between\r\n cactive_ddrc falling edge and the gating of\r\n ddrc_ker_ckg."] B_0X0 = 0, #[doc = "1: 3 periods of ddrc_ker_ck between\r\n cactive_ddrc falling edge and the gating of\r\n ddrc_ker_ckg."] B_0X1 = 1, #[doc = "7: 15 periods of ddrc_ker_ck between\r\n cactive_ddrc falling edge and the gating of\r\n ddrc_ker_ckg."] B_0X7 = 7, } impl From<KERDCG_DLY_A> for u8 { #[inline(always)] fn from(variant: KERDCG_DLY_A) -> Self { variant as _ } } #[doc = "Reader of field `KERDCG_DLY`"] pub type KERDCG_DLY_R = crate::R<u8, KERDCG_DLY_A>; impl KERDCG_DLY_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, KERDCG_DLY_A> { use crate::Variant::*; match self.bits { 0 => Val(KERDCG_DLY_A::B_0X0), 1 => Val(KERDCG_DLY_A::B_0X1), 7 => Val(KERDCG_DLY_A::B_0X7), i => Res(i), } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == KERDCG_DLY_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == KERDCG_DLY_A::B_0X1 } #[doc = "Checks if the value of the field is `B_0X7`"] #[inline(always)] pub fn is_b_0x7(&self) -> bool { *self == KERDCG_DLY_A::B_0X7 } } #[doc = "Write proxy for field `KERDCG_DLY`"] pub struct KERDCG_DLY_W<'a> { w: &'a mut W, } impl<'a> KERDCG_DLY_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: KERDCG_DLY_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "1 period of ddrc_ker_ck between cactive_ddrc falling edge and the gating of ddrc_ker_ckg."] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(KERDCG_DLY_A::B_0X0) } #[doc = "3 periods of ddrc_ker_ck between cactive_ddrc falling edge and the gating of ddrc_ker_ckg."] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(KERDCG_DLY_A::B_0X1) } #[doc = "15 periods of ddrc_ker_ck between cactive_ddrc falling edge and the gating of ddrc_ker_ckg."] #[inline(always)] pub fn b_0x7(self) -> &'a mut W { self.variant(KERDCG_DLY_A::B_0X7) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x07 << 11)) | (((value as u32) & 0x07) << 11); self.w } } #[doc = "DDRCAPBRST\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRCAPBRST_A { #[doc = "0: does not reset the DDRC APB\r\n interface"] B_0X0 = 0, #[doc = "1: resets the DDRC APB\r\n interface"] B_0X1 = 1, } impl From<DDRCAPBRST_A> for bool { #[inline(always)] fn from(variant: DDRCAPBRST_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRCAPBRST`"] pub type DDRCAPBRST_R = crate::R<bool, DDRCAPBRST_A>; impl DDRCAPBRST_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRCAPBRST_A { match self.bits { false => DDRCAPBRST_A::B_0X0, true => DDRCAPBRST_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRCAPBRST_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRCAPBRST_A::B_0X1 } } #[doc = "Write proxy for field `DDRCAPBRST`"] pub struct DDRCAPBRST_W<'a> { w: &'a mut W, } impl<'a> DDRCAPBRST_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRCAPBRST_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "does not reset the DDRC APB interface"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRCAPBRST_A::B_0X0) } #[doc = "resets the DDRC APB interface"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRCAPBRST_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14); self.w } } #[doc = "DDRCAXIRST\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRCAXIRST_A { #[doc = "0: does not reset the DDRC AXI\r\n interface"] B_0X0 = 0, #[doc = "1: resets the DDRC AXI\r\n interface"] B_0X1 = 1, } impl From<DDRCAXIRST_A> for bool { #[inline(always)] fn from(variant: DDRCAXIRST_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRCAXIRST`"] pub type DDRCAXIRST_R = crate::R<bool, DDRCAXIRST_A>; impl DDRCAXIRST_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRCAXIRST_A { match self.bits { false => DDRCAXIRST_A::B_0X0, true => DDRCAXIRST_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRCAXIRST_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRCAXIRST_A::B_0X1 } } #[doc = "Write proxy for field `DDRCAXIRST`"] pub struct DDRCAXIRST_W<'a> { w: &'a mut W, } impl<'a> DDRCAXIRST_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRCAXIRST_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "does not reset the DDRC AXI interface"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRCAXIRST_A::B_0X0) } #[doc = "resets the DDRC AXI interface"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRCAXIRST_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15); self.w } } #[doc = "DDRCORERST\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DDRCORERST_A { #[doc = "0: does not reset the DDRC\r\n core"] B_0X0 = 0, #[doc = "1: resets the DDRC core"] B_0X1 = 1, } impl From<DDRCORERST_A> for bool { #[inline(always)] fn from(variant: DDRCORERST_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DDRCORERST`"] pub type DDRCORERST_R = crate::R<bool, DDRCORERST_A>; impl DDRCORERST_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DDRCORERST_A { match self.bits { false => DDRCORERST_A::B_0X0, true => DDRCORERST_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRCORERST_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRCORERST_A::B_0X1 } } #[doc = "Write proxy for field `DDRCORERST`"] pub struct DDRCORERST_W<'a> { w: &'a mut W, } impl<'a> DDRCORERST_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRCORERST_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "does not reset the DDRC core"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRCORERST_A::B_0X0) } #[doc = "resets the DDRC core"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRCORERST_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16); self.w } } #[doc = "DPHYAPBRST\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DPHYAPBRST_A { #[doc = "0: does not reset the DDRPHYC APB\r\n interface"] B_0X0 = 0, #[doc = "1: resets the DDRPHYC APB\r\n interface"] B_0X1 = 1, } impl From<DPHYAPBRST_A> for bool { #[inline(always)] fn from(variant: DPHYAPBRST_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DPHYAPBRST`"] pub type DPHYAPBRST_R = crate::R<bool, DPHYAPBRST_A>; impl DPHYAPBRST_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DPHYAPBRST_A { match self.bits { false => DPHYAPBRST_A::B_0X0, true => DPHYAPBRST_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DPHYAPBRST_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DPHYAPBRST_A::B_0X1 } } #[doc = "Write proxy for field `DPHYAPBRST`"] pub struct DPHYAPBRST_W<'a> { w: &'a mut W, } impl<'a> DPHYAPBRST_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DPHYAPBRST_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "does not reset the DDRPHYC APB interface"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DPHYAPBRST_A::B_0X0) } #[doc = "resets the DDRPHYC APB interface"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DPHYAPBRST_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17); self.w } } #[doc = "DPHYRST\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DPHYRST_A { #[doc = "0: does not reset the\r\n DDRPHYC"] B_0X0 = 0, #[doc = "1: resets the DDRPHYC"] B_0X1 = 1, } impl From<DPHYRST_A> for bool { #[inline(always)] fn from(variant: DPHYRST_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DPHYRST`"] pub type DPHYRST_R = crate::R<bool, DPHYRST_A>; impl DPHYRST_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DPHYRST_A { match self.bits { false => DPHYRST_A::B_0X0, true => DPHYRST_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DPHYRST_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DPHYRST_A::B_0X1 } } #[doc = "Write proxy for field `DPHYRST`"] pub struct DPHYRST_W<'a> { w: &'a mut W, } impl<'a> DPHYRST_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DPHYRST_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "does not reset the DDRPHYC"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DPHYRST_A::B_0X0) } #[doc = "resets the DDRPHYC"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DPHYRST_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18); self.w } } #[doc = "DPHYCTLRST\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DPHYCTLRST_A { #[doc = "0: does not reset the DDRPHYC\r\n Control"] B_0X0 = 0, #[doc = "1: resets the DDRPHYC\r\n Control"] B_0X1 = 1, } impl From<DPHYCTLRST_A> for bool { #[inline(always)] fn from(variant: DPHYCTLRST_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DPHYCTLRST`"] pub type DPHYCTLRST_R = crate::R<bool, DPHYCTLRST_A>; impl DPHYCTLRST_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DPHYCTLRST_A { match self.bits { false => DPHYCTLRST_A::B_0X0, true => DPHYCTLRST_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DPHYCTLRST_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DPHYCTLRST_A::B_0X1 } } #[doc = "Write proxy for field `DPHYCTLRST`"] pub struct DPHYCTLRST_W<'a> { w: &'a mut W, } impl<'a> DPHYCTLRST_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DPHYCTLRST_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "does not reset the DDRPHYC Control"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DPHYCTLRST_A::B_0X0) } #[doc = "resets the DDRPHYC Control"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DPHYCTLRST_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19); self.w } } #[doc = "DDRCKMOD\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum DDRCKMOD_A { #[doc = "0: Normal mode: the gating of the\r\n dphy_ker_ck clock depends on the DDRPHYCEN,\r\n DDRPHYCLPEN and MPU mode. The gating of the\r\n ddrc_ker_ckg clock depends on the DDRCxEN,\r\n DDRCxLPEN and MPU mode. This mode must be\r\n selected during DDRC and DDRPHYC initialization\r\n phase, and if the application is using the\r\n software self-refresh (SSR)."] B_0X0 = 0, #[doc = "1: Automatic Self-Refresh mode (ASR1):\r\n the clock ddrc_ker_ckg is gated automatically\r\n according to cactive_ddrc signal. The gating of\r\n the dphy_ker_ck clock depends on the DDRPHYCEN,\r\n DDRPHYCLPEN and MPU mode."] B_0X1 = 1, #[doc = "2: Hardware Self-Refresh mode (HSR1):\r\n the gating of the ddrc_ker_ckg clock is\r\n controlled by the AXI-Low-Power interface\r\n connected to the DDRC. The gating of the\r\n dphy_ker_ck clock depends on the DDRPHYCEN,\r\n DDRPHYCLPEN and MPU mode."] B_0X2 = 2, #[doc = "5: Full Automatic Self-Refresh mode\r\n (ASR2): the clocks ddrc_ker_ckg and dphy_ker_ck\r\n are gated automatically according to cactive_ddrc\r\n signal."] B_0X5 = 5, #[doc = "6: Full Hardware Self-Refresh mode\r\n (HSR2): the gating of ddrc_ker_ckg and\r\n dphy_ker_ck clocks are controlled by the\r\n AXI-Low-Power interface connected to the\r\n DDRC."] B_0X6 = 6, } impl From<DDRCKMOD_A> for u8 { #[inline(always)] fn from(variant: DDRCKMOD_A) -> Self { variant as _ } } #[doc = "Reader of field `DDRCKMOD`"] pub type DDRCKMOD_R = crate::R<u8, DDRCKMOD_A>; impl DDRCKMOD_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, DDRCKMOD_A> { use crate::Variant::*; match self.bits { 0 => Val(DDRCKMOD_A::B_0X0), 1 => Val(DDRCKMOD_A::B_0X1), 2 => Val(DDRCKMOD_A::B_0X2), 5 => Val(DDRCKMOD_A::B_0X5), 6 => Val(DDRCKMOD_A::B_0X6), i => Res(i), } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DDRCKMOD_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DDRCKMOD_A::B_0X1 } #[doc = "Checks if the value of the field is `B_0X2`"] #[inline(always)] pub fn is_b_0x2(&self) -> bool { *self == DDRCKMOD_A::B_0X2 } #[doc = "Checks if the value of the field is `B_0X5`"] #[inline(always)] pub fn is_b_0x5(&self) -> bool { *self == DDRCKMOD_A::B_0X5 } #[doc = "Checks if the value of the field is `B_0X6`"] #[inline(always)] pub fn is_b_0x6(&self) -> bool { *self == DDRCKMOD_A::B_0X6 } } #[doc = "Write proxy for field `DDRCKMOD`"] pub struct DDRCKMOD_W<'a> { w: &'a mut W, } impl<'a> DDRCKMOD_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DDRCKMOD_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Normal mode: the gating of the dphy_ker_ck clock depends on the DDRPHYCEN, DDRPHYCLPEN and MPU mode. The gating of the ddrc_ker_ckg clock depends on the DDRCxEN, DDRCxLPEN and MPU mode. This mode must be selected during DDRC and DDRPHYC initialization phase, and if the application is using the software self-refresh (SSR)."] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DDRCKMOD_A::B_0X0) } #[doc = "Automatic Self-Refresh mode (ASR1): the clock ddrc_ker_ckg is gated automatically according to cactive_ddrc signal. The gating of the dphy_ker_ck clock depends on the DDRPHYCEN, DDRPHYCLPEN and MPU mode."] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DDRCKMOD_A::B_0X1) } #[doc = "Hardware Self-Refresh mode (HSR1): the gating of the ddrc_ker_ckg clock is controlled by the AXI-Low-Power interface connected to the DDRC. The gating of the dphy_ker_ck clock depends on the DDRPHYCEN, DDRPHYCLPEN and MPU mode."] #[inline(always)] pub fn b_0x2(self) -> &'a mut W { self.variant(DDRCKMOD_A::B_0X2) } #[doc = "Full Automatic Self-Refresh mode (ASR2): the clocks ddrc_ker_ckg and dphy_ker_ck are gated automatically according to cactive_ddrc signal."] #[inline(always)] pub fn b_0x5(self) -> &'a mut W { self.variant(DDRCKMOD_A::B_0X5) } #[doc = "Full Hardware Self-Refresh mode (HSR2): the gating of ddrc_ker_ckg and dphy_ker_ck clocks are controlled by the AXI-Low-Power interface connected to the DDRC."] #[inline(always)] pub fn b_0x6(self) -> &'a mut W { self.variant(DDRCKMOD_A::B_0X6) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x07 << 20)) | (((value as u32) & 0x07) << 20); self.w } } #[doc = "GSKPMOD\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum GSKPMOD_A { #[doc = "0: The GSKP block is controlled by the\r\n GSKPCTRL bit."] B_0X0 = 0, #[doc = "1: The GSKP block is controlled\r\n automatically by the DFI."] B_0X1 = 1, } impl From<GSKPMOD_A> for bool { #[inline(always)] fn from(variant: GSKPMOD_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `GSKPMOD`"] pub type GSKPMOD_R = crate::R<bool, GSKPMOD_A>; impl GSKPMOD_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> GSKPMOD_A { match self.bits { false => GSKPMOD_A::B_0X0, true => GSKPMOD_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == GSKPMOD_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == GSKPMOD_A::B_0X1 } } #[doc = "Write proxy for field `GSKPMOD`"] pub struct GSKPMOD_W<'a> { w: &'a mut W, } impl<'a> GSKPMOD_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GSKPMOD_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The GSKP block is controlled by the GSKPCTRL bit."] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(GSKPMOD_A::B_0X0) } #[doc = "The GSKP block is controlled automatically by the DFI."] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(GSKPMOD_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 23)) | (((value as u32) & 0x01) << 23); self.w } } #[doc = "GSKPCTRL\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum GSKPCTRL_A { #[doc = "0: The GSKP block is providing the\r\n clock phy_out_ck (provided by the\r\n DDRPHYC)"] B_0X0 = 0, #[doc = "1: The GSKP block is providing the\r\n clock dphy_ker_ck (provided by the\r\n RCC)"] B_0X1 = 1, } impl From<GSKPCTRL_A> for bool { #[inline(always)] fn from(variant: GSKPCTRL_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `GSKPCTRL`"] pub type GSKPCTRL_R = crate::R<bool, GSKPCTRL_A>; impl GSKPCTRL_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> GSKPCTRL_A { match self.bits { false => GSKPCTRL_A::B_0X0, true => GSKPCTRL_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == GSKPCTRL_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == GSKPCTRL_A::B_0X1 } } #[doc = "Write proxy for field `GSKPCTRL`"] pub struct GSKPCTRL_W<'a> { w: &'a mut W, } impl<'a> GSKPCTRL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GSKPCTRL_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The GSKP block is providing the clock phy_out_ck (provided by the DDRPHYC)"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(GSKPCTRL_A::B_0X0) } #[doc = "The GSKP block is providing the clock dphy_ker_ck (provided by the RCC)"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(GSKPCTRL_A::B_0X1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24); self.w } } #[doc = "DFILP_WIDTH\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum DFILP_WIDTH_A { #[doc = "0: Bypass, delay disabled"] B_0X0 = 0, #[doc = "1: Forces a delay of 160 x Tdphy_ker_ck\r\n to be used when Fdphy_ker_ck is between 120 and\r\n 160 MHz."] B_0X1 = 1, #[doc = "2: Forces a delay of 224 x Tdphy_ker_ck\r\n to be used when Fdphy_ker_ck is between 160 and\r\n 220 MHz."] B_0X2 = 2, #[doc = "3: Forces a delay of 320 x Tdphy_ker_ck\r\n to be used when Fdphy_ker_ck is between 220 and\r\n 320 MHz."] B_0X3 = 3, #[doc = "4: Forces a delay of 416 x Tdphy_ker_ck\r\n to be used when Fdphy_ker_ck is between 320 and\r\n 410 MHz."] B_0X4 = 4, } impl From<DFILP_WIDTH_A> for u8 { #[inline(always)] fn from(variant: DFILP_WIDTH_A) -> Self { variant as _ } } #[doc = "Reader of field `DFILP_WIDTH`"] pub type DFILP_WIDTH_R = crate::R<u8, DFILP_WIDTH_A>; impl DFILP_WIDTH_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, DFILP_WIDTH_A> { use crate::Variant::*; match self.bits { 0 => Val(DFILP_WIDTH_A::B_0X0), 1 => Val(DFILP_WIDTH_A::B_0X1), 2 => Val(DFILP_WIDTH_A::B_0X2), 3 => Val(DFILP_WIDTH_A::B_0X3), 4 => Val(DFILP_WIDTH_A::B_0X4), i => Res(i), } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == DFILP_WIDTH_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == DFILP_WIDTH_A::B_0X1 } #[doc = "Checks if the value of the field is `B_0X2`"] #[inline(always)] pub fn is_b_0x2(&self) -> bool { *self == DFILP_WIDTH_A::B_0X2 } #[doc = "Checks if the value of the field is `B_0X3`"] #[inline(always)] pub fn is_b_0x3(&self) -> bool { *self == DFILP_WIDTH_A::B_0X3 } #[doc = "Checks if the value of the field is `B_0X4`"] #[inline(always)] pub fn is_b_0x4(&self) -> bool { *self == DFILP_WIDTH_A::B_0X4 } } #[doc = "Write proxy for field `DFILP_WIDTH`"] pub struct DFILP_WIDTH_W<'a> { w: &'a mut W, } impl<'a> DFILP_WIDTH_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DFILP_WIDTH_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Bypass, delay disabled"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(DFILP_WIDTH_A::B_0X0) } #[doc = "Forces a delay of 160 x Tdphy_ker_ck to be used when Fdphy_ker_ck is between 120 and 160 MHz."] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(DFILP_WIDTH_A::B_0X1) } #[doc = "Forces a delay of 224 x Tdphy_ker_ck to be used when Fdphy_ker_ck is between 160 and 220 MHz."] #[inline(always)] pub fn b_0x2(self) -> &'a mut W { self.variant(DFILP_WIDTH_A::B_0X2) } #[doc = "Forces a delay of 320 x Tdphy_ker_ck to be used when Fdphy_ker_ck is between 220 and 320 MHz."] #[inline(always)] pub fn b_0x3(self) -> &'a mut W { self.variant(DFILP_WIDTH_A::B_0X3) } #[doc = "Forces a delay of 416 x Tdphy_ker_ck to be used when Fdphy_ker_ck is between 320 and 410 MHz."] #[inline(always)] pub fn b_0x4(self) -> &'a mut W { self.variant(DFILP_WIDTH_A::B_0X4) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x07 << 25)) | (((value as u32) & 0x07) << 25); self.w } } #[doc = "GSKP_DUR\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum GSKP_DUR_A { #[doc = "0: Sets a delay of 32 x\r\n Tdphy_ker_ck"] B_0X0 = 0, #[doc = "1: Sets a delay of 2 x 32 x\r\n Tdphy_ker_ck"] B_0X1 = 1, #[doc = "2: Sets a delay of 3 x 32 x\r\n Tdphy_ker_ck"] B_0X2 = 2, #[doc = "15: Sets a delay of 16 x 32 x\r\n Tdphy_ker_ck"] B_0XF = 15, } impl From<GSKP_DUR_A> for u8 { #[inline(always)] fn from(variant: GSKP_DUR_A) -> Self { variant as _ } } #[doc = "Reader of field `GSKP_DUR`"] pub type GSKP_DUR_R = crate::R<u8, GSKP_DUR_A>; impl GSKP_DUR_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, GSKP_DUR_A> { use crate::Variant::*; match self.bits { 0 => Val(GSKP_DUR_A::B_0X0), 1 => Val(GSKP_DUR_A::B_0X1), 2 => Val(GSKP_DUR_A::B_0X2), 15 => Val(GSKP_DUR_A::B_0XF), i => Res(i), } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == GSKP_DUR_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == GSKP_DUR_A::B_0X1 } #[doc = "Checks if the value of the field is `B_0X2`"] #[inline(always)] pub fn is_b_0x2(&self) -> bool { *self == GSKP_DUR_A::B_0X2 } #[doc = "Checks if the value of the field is `B_0XF`"] #[inline(always)] pub fn is_b_0x_f(&self) -> bool { *self == GSKP_DUR_A::B_0XF } } #[doc = "Write proxy for field `GSKP_DUR`"] pub struct GSKP_DUR_W<'a> { w: &'a mut W, } impl<'a> GSKP_DUR_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GSKP_DUR_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Sets a delay of 32 x Tdphy_ker_ck"] #[inline(always)] pub fn b_0x0(self) -> &'a mut W { self.variant(GSKP_DUR_A::B_0X0) } #[doc = "Sets a delay of 2 x 32 x Tdphy_ker_ck"] #[inline(always)] pub fn b_0x1(self) -> &'a mut W { self.variant(GSKP_DUR_A::B_0X1) } #[doc = "Sets a delay of 3 x 32 x Tdphy_ker_ck"] #[inline(always)] pub fn b_0x2(self) -> &'a mut W { self.variant(GSKP_DUR_A::B_0X2) } #[doc = "Sets a delay of 16 x 32 x Tdphy_ker_ck"] #[inline(always)] pub fn b_0x_f(self) -> &'a mut W { self.variant(GSKP_DUR_A::B_0XF) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x0f << 28)) | (((value as u32) & 0x0f) << 28); self.w } } impl R { #[doc = "Bit 0 - DDRC1EN"] #[inline(always)] pub fn ddrc1en(&self) -> DDRC1EN_R { DDRC1EN_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - DDRC1LPEN"] #[inline(always)] pub fn ddrc1lpen(&self) -> DDRC1LPEN_R { DDRC1LPEN_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - DDRC2EN"] #[inline(always)] pub fn ddrc2en(&self) -> DDRC2EN_R { DDRC2EN_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - DDRC2LPEN"] #[inline(always)] pub fn ddrc2lpen(&self) -> DDRC2LPEN_R { DDRC2LPEN_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - DDRPHYCEN"] #[inline(always)] pub fn ddrphycen(&self) -> DDRPHYCEN_R { DDRPHYCEN_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - DDRPHYCLPEN"] #[inline(always)] pub fn ddrphyclpen(&self) -> DDRPHYCLPEN_R { DDRPHYCLPEN_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - DDRCAPBEN"] #[inline(always)] pub fn ddrcapben(&self) -> DDRCAPBEN_R { DDRCAPBEN_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - DDRCAPBLPEN"] #[inline(always)] pub fn ddrcapblpen(&self) -> DDRCAPBLPEN_R { DDRCAPBLPEN_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bit 8 - AXIDCGEN"] #[inline(always)] pub fn axidcgen(&self) -> AXIDCGEN_R { AXIDCGEN_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 9 - DDRPHYCAPBEN"] #[inline(always)] pub fn ddrphycapben(&self) -> DDRPHYCAPBEN_R { DDRPHYCAPBEN_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 10 - DDRPHYCAPBLPEN"] #[inline(always)] pub fn ddrphycapblpen(&self) -> DDRPHYCAPBLPEN_R { DDRPHYCAPBLPEN_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bits 11:13 - KERDCG_DLY"] #[inline(always)] pub fn kerdcg_dly(&self) -> KERDCG_DLY_R { KERDCG_DLY_R::new(((self.bits >> 11) & 0x07) as u8) } #[doc = "Bit 14 - DDRCAPBRST"] #[inline(always)] pub fn ddrcapbrst(&self) -> DDRCAPBRST_R { DDRCAPBRST_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bit 15 - DDRCAXIRST"] #[inline(always)] pub fn ddrcaxirst(&self) -> DDRCAXIRST_R { DDRCAXIRST_R::new(((self.bits >> 15) & 0x01) != 0) } #[doc = "Bit 16 - DDRCORERST"] #[inline(always)] pub fn ddrcorerst(&self) -> DDRCORERST_R { DDRCORERST_R::new(((self.bits >> 16) & 0x01) != 0) } #[doc = "Bit 17 - DPHYAPBRST"] #[inline(always)] pub fn dphyapbrst(&self) -> DPHYAPBRST_R { DPHYAPBRST_R::new(((self.bits >> 17) & 0x01) != 0) } #[doc = "Bit 18 - DPHYRST"] #[inline(always)] pub fn dphyrst(&self) -> DPHYRST_R { DPHYRST_R::new(((self.bits >> 18) & 0x01) != 0) } #[doc = "Bit 19 - DPHYCTLRST"] #[inline(always)] pub fn dphyctlrst(&self) -> DPHYCTLRST_R { DPHYCTLRST_R::new(((self.bits >> 19) & 0x01) != 0) } #[doc = "Bits 20:22 - DDRCKMOD"] #[inline(always)] pub fn ddrckmod(&self) -> DDRCKMOD_R { DDRCKMOD_R::new(((self.bits >> 20) & 0x07) as u8) } #[doc = "Bit 23 - GSKPMOD"] #[inline(always)] pub fn gskpmod(&self) -> GSKPMOD_R { GSKPMOD_R::new(((self.bits >> 23) & 0x01) != 0) } #[doc = "Bit 24 - GSKPCTRL"] #[inline(always)] pub fn gskpctrl(&self) -> GSKPCTRL_R { GSKPCTRL_R::new(((self.bits >> 24) & 0x01) != 0) } #[doc = "Bits 25:27 - DFILP_WIDTH"] #[inline(always)] pub fn dfilp_width(&self) -> DFILP_WIDTH_R { DFILP_WIDTH_R::new(((self.bits >> 25) & 0x07) as u8) } #[doc = "Bits 28:31 - GSKP_DUR"] #[inline(always)] pub fn gskp_dur(&self) -> GSKP_DUR_R { GSKP_DUR_R::new(((self.bits >> 28) & 0x0f) as u8) } } impl W { #[doc = "Bit 0 - DDRC1EN"] #[inline(always)] pub fn ddrc1en(&mut self) -> DDRC1EN_W { DDRC1EN_W { w: self } } #[doc = "Bit 1 - DDRC1LPEN"] #[inline(always)] pub fn ddrc1lpen(&mut self) -> DDRC1LPEN_W { DDRC1LPEN_W { w: self } } #[doc = "Bit 2 - DDRC2EN"] #[inline(always)] pub fn ddrc2en(&mut self) -> DDRC2EN_W { DDRC2EN_W { w: self } } #[doc = "Bit 3 - DDRC2LPEN"] #[inline(always)] pub fn ddrc2lpen(&mut self) -> DDRC2LPEN_W { DDRC2LPEN_W { w: self } } #[doc = "Bit 4 - DDRPHYCEN"] #[inline(always)] pub fn ddrphycen(&mut self) -> DDRPHYCEN_W { DDRPHYCEN_W { w: self } } #[doc = "Bit 5 - DDRPHYCLPEN"] #[inline(always)] pub fn ddrphyclpen(&mut self) -> DDRPHYCLPEN_W { DDRPHYCLPEN_W { w: self } } #[doc = "Bit 6 - DDRCAPBEN"] #[inline(always)] pub fn ddrcapben(&mut self) -> DDRCAPBEN_W { DDRCAPBEN_W { w: self } } #[doc = "Bit 7 - DDRCAPBLPEN"] #[inline(always)] pub fn ddrcapblpen(&mut self) -> DDRCAPBLPEN_W { DDRCAPBLPEN_W { w: self } } #[doc = "Bit 8 - AXIDCGEN"] #[inline(always)] pub fn axidcgen(&mut self) -> AXIDCGEN_W { AXIDCGEN_W { w: self } } #[doc = "Bit 9 - DDRPHYCAPBEN"] #[inline(always)] pub fn ddrphycapben(&mut self) -> DDRPHYCAPBEN_W { DDRPHYCAPBEN_W { w: self } } #[doc = "Bit 10 - DDRPHYCAPBLPEN"] #[inline(always)] pub fn ddrphycapblpen(&mut self) -> DDRPHYCAPBLPEN_W { DDRPHYCAPBLPEN_W { w: self } } #[doc = "Bits 11:13 - KERDCG_DLY"] #[inline(always)] pub fn kerdcg_dly(&mut self) -> KERDCG_DLY_W { KERDCG_DLY_W { w: self } } #[doc = "Bit 14 - DDRCAPBRST"] #[inline(always)] pub fn ddrcapbrst(&mut self) -> DDRCAPBRST_W { DDRCAPBRST_W { w: self } } #[doc = "Bit 15 - DDRCAXIRST"] #[inline(always)] pub fn ddrcaxirst(&mut self) -> DDRCAXIRST_W { DDRCAXIRST_W { w: self } } #[doc = "Bit 16 - DDRCORERST"] #[inline(always)] pub fn ddrcorerst(&mut self) -> DDRCORERST_W { DDRCORERST_W { w: self } } #[doc = "Bit 17 - DPHYAPBRST"] #[inline(always)] pub fn dphyapbrst(&mut self) -> DPHYAPBRST_W { DPHYAPBRST_W { w: self } } #[doc = "Bit 18 - DPHYRST"] #[inline(always)] pub fn dphyrst(&mut self) -> DPHYRST_W { DPHYRST_W { w: self } } #[doc = "Bit 19 - DPHYCTLRST"] #[inline(always)] pub fn dphyctlrst(&mut self) -> DPHYCTLRST_W { DPHYCTLRST_W { w: self } } #[doc = "Bits 20:22 - DDRCKMOD"] #[inline(always)] pub fn ddrckmod(&mut self) -> DDRCKMOD_W { DDRCKMOD_W { w: self } } #[doc = "Bit 23 - GSKPMOD"] #[inline(always)] pub fn gskpmod(&mut self) -> GSKPMOD_W { GSKPMOD_W { w: self } } #[doc = "Bit 24 - GSKPCTRL"] #[inline(always)] pub fn gskpctrl(&mut self) -> GSKPCTRL_W { GSKPCTRL_W { w: self } } #[doc = "Bits 25:27 - DFILP_WIDTH"] #[inline(always)] pub fn dfilp_width(&mut self) -> DFILP_WIDTH_W { DFILP_WIDTH_W { w: self } } #[doc = "Bits 28:31 - GSKP_DUR"] #[inline(always)] pub fn gskp_dur(&mut self) -> GSKP_DUR_W { GSKP_DUR_W { w: self } } }
pub fn multiply(a: i32, b: i32) -> i32 { a * b } pub fn sub(a: i32, b: i32) -> i32 { a - b }
//extern crate libc; //extern crate serde; use libc::{ CLONE_NEWIPC, CLONE_NEWNET, CLONE_NEWNS, CLONE_NEWPID, CLONE_NEWUSER, CLONE_NEWUTS, SIGCHLD, }; use serde::{Serialize, Deserialize}; use serde_json; use std::ffi::CString; use std::fs::File; use std::fs::OpenOptions; use std::io::prelude::*; use std::io::Error; #[derive(Serialize, Deserialize, Debug, Clone)] struct Cmd { executable: String, args: Vec<String>, } #[derive(Serialize, Deserialize, Debug, Clone)] struct MountBinds { source: String, target: String, read_only: Option<bool>, } #[derive(Serialize, Deserialize, Debug, Clone)] struct UserMap { uid: u16, gid: u16, } #[derive(Serialize, Deserialize,Debug, Clone)] struct NsConfig { hostname: Option<String>, program: Cmd, user_map: Option<UserMap>, mount_bindings: Option<Vec<MountBinds>>, chroot: String, } impl NsConfig { fn from_file(config_file_name: String) -> NsConfig { let error_message = format!("Config file not found: {}", config_file_name); let mut file = File::open(config_file_name).expect(error_message.as_str()); let mut server_config = String::new(); file.read_to_string(&mut server_config).unwrap(); let ns_config: NsConfig = serde_json::from_str(&mut server_config).unwrap(); ns_config } fn program(&self) -> Cmd { self.program.clone() } fn bind_map(&self) -> Option<Vec<MountBinds>> { self.mount_bindings.clone() } fn user_map(&self) -> Option<UserMap> { self.user_map.clone() } fn chroot(&self) -> String { self.chroot.clone() } fn hostname(&self) -> Option<String> { self.hostname.clone() } } fn create_namespace_mapping(UserMap { uid, gid }: UserMap) { let pid = "self"; let setgroups_file = format!("/proc/{}/setgroups", pid); let uid_map_file = format!("/proc/{}/uid_map", pid); let gid_map_file = format!("/proc/{}/gid_map", pid); let mut setgroups = OpenOptions::new() .write(true) .read(true) .open(&setgroups_file) .unwrap(); setgroups .write_all(b"deny") .expect("Unable to write to setgroups"); setgroups.flush().unwrap(); drop(setgroups); let mut uid_map = OpenOptions::new() .read(true) .write(true) .open(uid_map_file) .unwrap(); let uid_map_content = format!("0 {} 1", uid); uid_map .write_all(uid_map_content.as_bytes()) .expect("Unable to write UID map"); uid_map.flush().unwrap(); drop(uid_map); let mut gid_map = OpenOptions::new() .read(true) .write(true) .open(gid_map_file) .unwrap(); let gid_map_content = format!("0 {} 1", gid); gid_map .write_all(gid_map_content.as_bytes()) .expect("Unable to write GID map"); gid_map.flush().unwrap(); drop(gid_map); } fn setup_mount_binds(mount_bind_maps: Vec<MountBinds>) { unsafe { //Bind mounts before chroot for bm in mount_bind_maps { let res = libc::mount( CString::new(bm.source.as_bytes()).unwrap().as_ptr(), CString::new(bm.target.as_bytes()).unwrap().as_ptr(), std::ptr::null(), libc::MS_BIND, //Note readonly flag wont work here - handle RO binds later std::ptr::null(), ); if res != 0 { println!( "bind mount failed with an error -> {:?}", Error::last_os_error() ); } else { //Handle RO binds if let Some(true) = bm.read_only { let res = libc::mount( std::ptr::null(), CString::new(bm.target.as_bytes()).unwrap().as_ptr(), std::ptr::null(), libc::MS_RDONLY | libc::MS_BIND | libc::MS_REMOUNT, std::ptr::null(), ); if res != 0 { println!( "bind readonly switch failed with an error -> {:?}", Error::last_os_error() ); } } } } } } fn setup_chroot_env(chroot_path: String) { unsafe { let res = libc::chroot(CString::new(chroot_path.as_bytes()).unwrap().as_ptr()); if res != 0 { println!( "chroot failed with an error -> {:?}", Error::last_os_error() ); } let res = libc::chdir(CString::new("/").unwrap().as_ptr()); if res != 0 { println!("chdir failed with an error -> {:?}", Error::last_os_error()); } //Mount proc filesystem let res = libc::mount( std::ptr::null(), CString::new("/proc").unwrap().as_ptr(), CString::new("proc").unwrap().as_ptr(), 0u64, std::ptr::null(), ); if res != 0 { println!( "proc mount failed with an error -> {:?}", Error::last_os_error() ); } //Mount dev filesystem let res = libc::mount( std::ptr::null(), CString::new("/dev").unwrap().as_ptr(), CString::new("tmpfs").unwrap().as_ptr(), 0u64, std::ptr::null(), ); if res != 0 { println!( "dev mount failed with an error -> {:?}", Error::last_os_error() ); } //Mount run filesystem let res = libc::mount( std::ptr::null(), CString::new("/run").unwrap().as_ptr(), CString::new("tmpfs").unwrap().as_ptr(), 0u64, std::ptr::null(), ); if res != 0 { println!( "run mount failed with an error -> {:?}", Error::last_os_error() ); } } } fn exec_in_ns(Cmd { executable, args }: Cmd) -> libc::c_int { unsafe { //First argument will be the program name itself. See execv syscall description let mut args_cstring: Vec<CString> = vec![CString::new(executable.as_bytes()).unwrap()]; for arg in args { args_cstring.push(CString::new(arg.as_bytes()).unwrap()); } let mut args_c_char: Vec<*const libc::c_char> = args_cstring.iter().map(|arg| arg.as_ptr()).collect(); args_c_char.push(std::ptr::null()); //NULL terminated let res = libc::execv( CString::new(executable.as_bytes()).unwrap().as_ptr(), args_c_char.as_ptr(), ); if res != 0 { println!( "Entry point execution failed with an error -> {:?}", Error::last_os_error() ); } res } } extern "C" fn setup_ns(ns_config: *mut NsConfig) -> libc::c_int { let ns_config: &NsConfig = unsafe { &mut *ns_config }; //Set hostname unsafe { if let Some(hostname) = ns_config.hostname() { let res = libc::sethostname( CString::new(hostname.as_bytes()).unwrap().as_ptr(), hostname.as_bytes().len(), ); if res != 0 { println!( "proc mount failed with an error -> {:?}", Error::last_os_error() ); } } //Do mappings UID and GID mappings println!("setting up uid and gid mappings"); match ns_config.user_map() { Some(user_map) => create_namespace_mapping(user_map), None => {} } //Mount bindings println!("setting up binds"); match ns_config.bind_map() { Some(bind_map) => setup_mount_binds(bind_map), None => {} } //Chroot and Proc Mount println!("setting up chroot"); //setup_chroot_env(ns_config.chroot()); //EXECV the program replacing the clone completely println!("setting up to execute program"); exec_in_ns(ns_config.program()) } } fn main() { //CLONE and then EXEC entry point in CLONE unsafe { let mut ns_config = NsConfig::from_file("container.json".to_string()); let mut nstack = [0u8; 4096]; let ptr = nstack.as_mut_ptr().offset(nstack.len() as isize); let ptr_aligned = ptr.offset((ptr as usize % 16) as isize * -1); //CLONE FLAGS for namespace let mut ns_flags: libc::c_int = CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWPID | CLONE_NEWNET | SIGCHLD; //NEWUSER if user mappings are present match ns_config.user_map { Some(_) => ns_flags = ns_flags | CLONE_NEWUSER, None => {} } let pid = libc::clone( std::mem::transmute(setup_ns as extern "C" fn(*mut NsConfig) -> libc::c_int), ptr_aligned as *mut libc::c_void, ns_flags, &mut ns_config as *mut _ as *mut libc::c_void, ); //If NEWUSER is not used/user mappings are not provided then you got to be root to perform clone if pid != 0 { println!("Program PID -> {}", pid); let mut rusage: libc::rusage = std::mem::MaybeUninit::uninit().assume_init(); let mut status: i32 = 0; let options: i32 = 0; let res = libc::wait4(pid, &mut status, options, &mut rusage); println!("CN WAIT RESULT -> {}", res); println!("CN RUSAGE -> {:#?}", rusage); println!("CN WAIT STATUS -> {}", status); if status != 0 { println!( "CN WAIT ERROR WHILE RUNNING -> {:?}", Error::last_os_error() ); } } // no else } }
use anyhow::Context; use lilac::Lilac; use rodio::{Sink, Source}; use std::{path::PathBuf, process, thread}; use structopt::StructOpt; type Result = anyhow::Result<()>; const OK: Result = Result::Ok(()); mod interactive; mod transcode; /// LILAC playback and transcoding utility /// /// If neither of the subcommands are detected, /// opens an interactive player and load the provided files. #[derive(StructOpt)] enum Opt { /// Plays a LILAC file Play { /// File to play #[structopt(name = "FILE")] file: PathBuf, /// Playback volume /// /// Should be anywhere between 0.0 and 1.0 inclusively #[structopt(short, long, name = "VOLUME", default_value = "1.0")] volume: f32, }, /// Transcodes a file to or from LILAC /// /// Supports transcoding from MP3, FLAC, /// OGG and WAV, and transcoding to WAV. /// Input and output formats are automatically inferred Transcode { /// Glob matching the input files #[structopt(name = "GLOB")] glob: String, /// Output files naming pattern /// /// %F is replaced with the input filename without extension, /// %E with the output format extension, /// %e with the input format extension, /// %T with the song title, /// %A with the song artist, /// %a with the song album. #[structopt(name = "PATTERN", default_value = "%F.%E")] output: String, /// Keep input files after transcoding #[structopt(short, long)] keep: bool, }, #[structopt(external_subcommand)] Interactive(Vec<String>), } fn main() { if let Err(e) = match Opt::from_args() { Opt::Play { file, volume } => play(file, volume), Opt::Transcode { glob, output, keep } => transcode::main(glob, output, keep), Opt::Interactive(queue) => interactive::main(queue), } { eprintln!("{:#}", e); process::exit(1); } } fn play(file: PathBuf, volume: f32) -> Result { let lilac = Lilac::read_file(file)?; println!( "Now playing {} by {} on {}", lilac.title(), lilac.artist(), lilac.album(), ); let device = rodio::default_output_device().context("no audio device")?; let sink = Sink::new(&device); let source = lilac.source(); let duration = source.total_duration().unwrap(); sink.set_volume(volume); sink.append(source); sink.play(); thread::sleep(duration); OK }
use std::sync::MutexGuard; use nia_interpreter_core::Interpreter; use nia_interpreter_core::NiaInterpreterCommand; use nia_interpreter_core::NiaInterpreterCommandResult; use nia_interpreter_core::{EventLoopHandle, NiaRemoveMappingCommandResult}; use crate::error::{NiaServerError, NiaServerResult}; use crate::protocol::{NiaConvertable, NiaRemoveMappingRequest, Serializable}; use nia_protocol_rust::RemoveMappingResponse; #[derive(Debug, Clone)] pub struct NiaRemoveMappingResponse { command_result: NiaRemoveMappingCommandResult, } impl NiaRemoveMappingResponse { fn try_from( nia_remove_mapping_request: NiaRemoveMappingRequest, event_loop_handle: MutexGuard<EventLoopHandle>, ) -> Result<NiaRemoveMappingResponse, NiaServerError> { let key_chords = nia_remove_mapping_request .take_key_chords() .iter() .map(|key_chord| key_chord.to_interpreter_repr()) .collect::<Vec<nia_interpreter_core::KeyChord>>(); let interpreter_command = NiaInterpreterCommand::make_remove_mapping_command(key_chords); event_loop_handle .send_command(interpreter_command) .map_err(|_| { NiaServerError::interpreter_error( "Error sending command to the interpreter.", ) })?; let execution_result = event_loop_handle.receive_result().map_err(|_| { NiaServerError::interpreter_error( "Error reading command from the interpreter.", ) })?; let response = match execution_result { NiaInterpreterCommandResult::RemoveMapping(command_result) => { NiaRemoveMappingResponse { command_result } } _ => { return NiaServerError::interpreter_error( "Unexpected command result.", ) .into() } }; Ok(response) } pub fn from( nia_remove_mapping_request: NiaRemoveMappingRequest, event_loop_handle: MutexGuard<EventLoopHandle>, ) -> NiaRemoveMappingResponse { let try_result = NiaRemoveMappingResponse::try_from( nia_remove_mapping_request, event_loop_handle, ); match try_result { Ok(result) => result, Err(error) => { let message = format!("Execution failure: {}", error.get_message()); let command_result = NiaRemoveMappingCommandResult::Failure(message); NiaRemoveMappingResponse { command_result } } } } } impl Serializable< NiaRemoveMappingResponse, nia_protocol_rust::RemoveMappingResponse, > for NiaRemoveMappingResponse { fn to_pb(&self) -> RemoveMappingResponse { let result = &self.command_result; let mut remove_mapping_response = nia_protocol_rust::RemoveMappingResponse::new(); match result { NiaRemoveMappingCommandResult::Success() => { let mut success_result = nia_protocol_rust::RemoveMappingResponse_SuccessResult::new( ); success_result.set_message(protobuf::Chars::from( String::from("Success."), )); remove_mapping_response.set_success_result(success_result); } NiaRemoveMappingCommandResult::Error(error_message) => { let mut error_result = nia_protocol_rust::RemoveMappingResponse_ErrorResult::new(); error_result .set_message(protobuf::Chars::from(error_message.clone())); remove_mapping_response.set_error_result(error_result); } NiaRemoveMappingCommandResult::Failure(failure_message) => { let mut failure_result = nia_protocol_rust::RemoveMappingResponse_FailureResult::new( ); failure_result.set_message(protobuf::Chars::from( failure_message.clone(), )); remove_mapping_response.set_failure_result(failure_result); } } remove_mapping_response } fn from_pb( object_pb: RemoveMappingResponse, ) -> NiaServerResult<NiaRemoveMappingResponse> { unreachable!() } }
//! A module containing support for EEPROM. //! //! EEPROM requires using DMA to issue commands for both reading and writing. use super::{Error, MediaType, RawSaveAccess}; use crate::{ prelude::*, save::{lock_media, MediaInfo, Timeout}, sync::with_irqs_disabled, }; use core::cmp; use voladdress::*; const PORT: VolAddress<u16, Safe, Safe> = unsafe { VolAddress::new(0x0DFFFF00) }; const SECTOR_SHIFT: usize = 3; const SECTOR_LEN: usize = 1 << SECTOR_SHIFT; const SECTOR_MASK: usize = SECTOR_LEN - 1; /// Disable IRQs and DMAs during each read block. fn disable_dmas(func: impl FnOnce()) { with_irqs_disabled(|| unsafe { // Disable other DMAs. This avoids our read/write from being interrupted // by a higher priority DMA channel. let dma0_ctl = DMA0CNT_H.read(); let dma1_ctl = DMA1CNT_H.read(); let dma2_ctl = DMA2CNT_H.read(); DMA0CNT_H.write(dma0_ctl.with_enabled(false)); DMA1CNT_H.write(dma1_ctl.with_enabled(false)); DMA2CNT_H.write(dma2_ctl.with_enabled(false)); // Executes the body of the function with DMAs and IRQs disabled. func(); // Continues higher priority DMAs if they were enabled before. DMA0CNT_H.write(dma0_ctl); DMA1CNT_H.write(dma1_ctl); DMA2CNT_H.write(dma2_ctl); }); } /// Sends a DMA command to EEPROM. fn dma_send(source: &[u32], ct: u16) { disable_dmas(|| unsafe { core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst); DMA3SAD.write(source.as_ptr() as usize); DMA3DAD.write(0x0DFFFF00); DMA3CNT_L.write(ct); let dma3_ctl = DmaControl::new() .with_dest_addr(DestAddrControl::Increment) .with_src_addr(SrcAddrControl::Increment) .with_enabled(true); DMA3CNT_H.write(dma3_ctl); }); } /// Receives a DMA packet from EEPROM. fn dma_receive(source: &mut [u32], ct: u16) { disable_dmas(|| unsafe { DMA3SAD.write(0x0DFFFF00); DMA3DAD.write(source.as_mut_ptr() as usize); DMA3CNT_L.write(ct); let dma3_ctl = DmaControl::new() .with_dest_addr(DestAddrControl::Increment) .with_src_addr(SrcAddrControl::Increment) .with_enabled(true); DMA3CNT_H.write(dma3_ctl); core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst); }); } /// Union type to help build/receive commands. struct BufferData { idx: usize, data: BufferContents, } #[repr(align(4))] union BufferContents { uninit: (), bits: [u16; 82], words: [u32; 41], } impl BufferData { fn new() -> Self { BufferData { idx: 0, data: BufferContents { uninit: () } } } /// Writes a bit to the output buffer. fn write_bit(&mut self, val: u8) { unsafe { self.data.bits[self.idx] = val as u16; self.idx += 1; } } /// Writes a number to the output buffer fn write_num(&mut self, count: usize, num: u32) { for i in 0..count { self.write_bit(((num >> (count - 1 - i)) & 1) as u8); } } /// Reads a number from the input buffer. fn read_num(&mut self, off: usize, count: usize) -> u32 { let mut accum = 0; unsafe { for i in 0..count { accum <<= 1; accum |= self.data.bits[off + i] as u32; } } accum } /// Receives a number of words into the input buffer. fn receive(&mut self, count: usize) { unsafe { dma_receive(&mut self.data.words, count as u16); } } /// Submits the current buffer via DMA. fn submit(&self) { unsafe { dma_send(&self.data.words, self.idx as u16); } } } /// The properties of a given EEPROM type. struct EepromProperties { addr_bits: usize, byte_len: usize, } impl EepromProperties { /// Reads a block from the save media. fn read_sector(&self, word: usize) -> [u8; 8] { // Set address command. The command is two one bits, followed by the // address, followed by a zero bit. // // 512B Command: [1 1|n n n n n n|0] // 8KiB Command: [1 1|n n n n n n n n n n n n n n|0] let mut buf = BufferData::new(); buf.write_bit(1); buf.write_bit(1); buf.write_num(self.addr_bits, word as u32); buf.write_bit(0); buf.submit(); // Receive the buffer data. The EEPROM sends 3 irrelevant bits followed // by 64 data bits. buf.receive(68); let mut out = [0; 8]; for i in 0..8 { out[i] = buf.read_num(4 + i * 8, 8) as u8; } out } /// Writes a sector directly. fn write_sector_raw(&self, word: usize, block: &[u8]) -> Result<(), Error> { // Write sector command. The command is a one bit, followed by a // zero bit, followed by the address, followed by 64 bits of data. // // 512B Command: [1 0|n n n n n n|v v v v ...] // 8KiB Command: [1 0|n n n n n n n n n n n n n n|v v v v ...] let mut buf = BufferData::new(); buf.write_bit(1); buf.write_bit(0); buf.write_num(self.addr_bits, word as u32); for i in 0..8 { buf.write_num(8, block[i] as u32); } buf.write_bit(0); buf.submit(); // Wait for the sector to be written for 10 milliseconds. let timeout = Timeout::new()?; timeout.start(); while PORT.read() & 1 != 1 { if timeout.is_timeout_met(10) { return Err(Error::OperationTimedOut); } } Ok(()) } /// Writes a sector to the EEPROM, keeping any current contents outside the /// buffer's range. fn write_sector_safe(&self, word: usize, data: &[u8], start: usize) -> Result<(), Error> { let mut buf = self.read_sector(word); buf[start..start + data.len()].copy_from_slice(data); self.write_sector_raw(word, &buf) } /// Writes a sector to the EEPROM. fn write_sector(&self, word: usize, data: &[u8], start: usize) -> Result<(), Error> { if data.len() == 8 && start == 0 { self.write_sector_raw(word, data) } else { self.write_sector_safe(word, data, start) } } /// Checks whether an offset is in range. fn check_offset(&self, offset: usize, len: usize) -> Result<(), Error> { if offset.checked_add(len).is_none() && (offset + len) > self.byte_len { Err(Error::OutOfBounds) } else { Ok(()) } } /// Implements EEPROM reads. fn read(&self, mut offset: usize, mut buf: &mut [u8]) -> Result<(), Error> { self.check_offset(offset, buf.len())?; let _guard = lock_media()?; while buf.len() != 0 { let start = offset & SECTOR_MASK; let end_len = cmp::min(SECTOR_LEN - start, buf.len()); let sector = self.read_sector(offset >> SECTOR_SHIFT); buf[..end_len].copy_from_slice(&sector[start..start + end_len]); buf = &mut buf[end_len..]; offset += end_len; } Ok(()) } /// Implements EEPROM verifies. fn verify(&self, mut offset: usize, mut buf: &[u8]) -> Result<bool, Error> { self.check_offset(offset, buf.len())?; let _guard = lock_media()?; while buf.len() != 0 { let start = offset & SECTOR_MASK; let end_len = cmp::min(SECTOR_LEN - start, buf.len()); if &buf[..end_len] != &self.read_sector(offset >> SECTOR_SHIFT) { return Ok(false); } buf = &buf[end_len..]; offset += end_len; } Ok(true) } /// Implements EEPROM writes. fn write(&self, mut offset: usize, mut buf: &[u8]) -> Result<(), Error> { self.check_offset(offset, buf.len())?; let _guard = lock_media()?; while buf.len() != 0 { let start = offset & SECTOR_MASK; let end_len = cmp::min(SECTOR_LEN - start, buf.len()); self.write_sector(offset >> SECTOR_SHIFT, &buf[..end_len], start)?; buf = &buf[end_len..]; offset += end_len; } Ok(()) } } const PROPS_512B: EepromProperties = EepromProperties { addr_bits: 6, byte_len: 512 }; const PROPS_8K: EepromProperties = EepromProperties { addr_bits: 14, byte_len: 8 * 1024 }; /// The [`RawSaveAccess`] used for 512 byte EEPROM. pub struct Eeprom512B; impl RawSaveAccess for Eeprom512B { fn info(&self) -> Result<&'static MediaInfo, Error> { Ok(&MediaInfo { media_type: MediaType::Eeprom512B, sector_shift: 3, sector_count: 64, requires_prepare_write: false, }) } fn read(&self, offset: usize, buffer: &mut [u8]) -> Result<(), Error> { PROPS_512B.read(offset, buffer) } fn verify(&self, offset: usize, buffer: &[u8]) -> Result<bool, Error> { PROPS_512B.verify(offset, buffer) } fn prepare_write(&self, _: usize, _: usize) -> Result<(), Error> { Ok(()) } fn write(&self, offset: usize, buffer: &[u8]) -> Result<(), Error> { PROPS_512B.write(offset, buffer) } } /// The [`RawSaveAccess`] used for 8 KiB EEPROM. pub struct Eeprom8K; impl RawSaveAccess for Eeprom8K { fn info(&self) -> Result<&'static MediaInfo, Error> { Ok(&MediaInfo { media_type: MediaType::Eeprom8K, sector_shift: 3, sector_count: 1024, requires_prepare_write: false, }) } fn read(&self, offset: usize, buffer: &mut [u8]) -> Result<(), Error> { PROPS_8K.read(offset, buffer) } fn verify(&self, offset: usize, buffer: &[u8]) -> Result<bool, Error> { PROPS_8K.verify(offset, buffer) } fn prepare_write(&self, _: usize, _: usize) -> Result<(), Error> { Ok(()) } fn write(&self, offset: usize, buffer: &[u8]) -> Result<(), Error> { PROPS_8K.write(offset, buffer) } }
use crate::io::BoxedIo; use futures::TryFuture; use linkerd2_conditional::Conditional; use linkerd2_identity as identity; use pin_project::pin_project; pub use rustls::ClientConfig as Config; use std::future::Future; use std::io; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use tokio::net::TcpStream; use tracing::{debug, trace}; pub trait HasConfig { fn tls_client_config(&self) -> Arc<Config>; } #[derive(Clone, Debug)] pub struct ConnectLayer<L>(super::Conditional<L>); #[derive(Clone, Debug)] pub struct Connect<L, C> { local: super::Conditional<L>, inner: C, } pub type Connection = BoxedIo; /// A socket that is in the process of connecting. #[pin_project] pub struct ConnectFuture<L, F: TryFuture> { #[pin] state: ConnectState<L, F>, } #[pin_project(project = ConnectStateProj)] enum ConnectState<L, F: TryFuture> { Init { #[pin] future: F, tls: super::Conditional<(identity::Name, L)>, }, Handshake(#[pin] tokio_rustls::Connect<F::Ok>), } // === impl ConnectLayer === impl<L> ConnectLayer<L> { pub fn new(l: super::Conditional<L>) -> ConnectLayer<L> { ConnectLayer(l) } } impl<L: Clone, C> tower::layer::Layer<C> for ConnectLayer<L> { type Service = Connect<L, C>; fn layer(&self, inner: C) -> Self::Service { Connect { local: self.0.clone(), inner, } } } // === impl Connect === /// impl MakeConnection impl<L, C, T> tower::Service<T> for Connect<L, C> where T: super::HasPeerIdentity, L: HasConfig + Clone, C: tower::Service<T, Response = TcpStream>, C::Future: Send + 'static, C::Error: ::std::error::Error + Send + Sync + 'static, C::Error: From<io::Error>, { type Response = Connection; type Error = C::Error; type Future = ConnectFuture<L, C::Future>; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.inner.poll_ready(cx) } fn call(&mut self, target: T) -> Self::Future { let peer_identity = target.peer_identity(); debug!(peer.identity = ?peer_identity); let tls = self .local .clone() .and_then(|l| peer_identity.map(|n| (n, l))); ConnectFuture { state: ConnectState::Init { future: self.inner.call(target), tls, }, } } } // ===== impl ConnectFuture ===== impl<L, F> Future for ConnectFuture<L, F> where L: HasConfig, F: TryFuture<Ok = TcpStream>, F::Error: From<io::Error>, { type Output = Result<Connection, F::Error>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let mut this = self.project(); loop { match this.state.as_mut().project() { ConnectStateProj::Init { future, tls } => { let io = futures::ready!(future.try_poll(cx))?; match tls { Conditional::Some((peer_identity, local_tls)) => { trace!(peer.id = %peer_identity, "initiating TLS"); let handshake = tokio_rustls::TlsConnector::from(local_tls.tls_client_config()) .connect(peer_identity.as_dns_name_ref(), io); this.state.set(ConnectState::Handshake(handshake)); } Conditional::None(reason) => { trace!(%reason, "skipping TLS"); return Poll::Ready(Ok(Connection::new(io))); } } } ConnectStateProj::Handshake(fut) => { let io = futures::ready!(fut.poll(cx))?; trace!("established TLS"); return Poll::Ready(Ok(Connection::new(io))); } }; } } } impl HasConfig for identity::CrtKey { fn tls_client_config(&self) -> Arc<Config> { identity::CrtKey::tls_client_config(self) } } impl HasConfig for identity::TrustAnchors { fn tls_client_config(&self) -> Arc<Config> { identity::TrustAnchors::tls_client_config(self) } }
use anyhow::{Context, Error, Result}; use serde::Serialize; use sha3::Digest; use stark_hash::{HashChain, StarkHash}; use crate::core::ClassHash; use crate::sequencer::request::contract::EntryPointType; /// Computes the starknet class hash for given class definition json blob. /// /// The structure of the blob is not strictly defined, so it lives in privacy under `json` module /// of this module. The class hash has [official documentation][starknet-doc] and [cairo-lang /// has an implementation][cairo-compute] which is half-python and half-[cairo][cairo-contract]. /// /// Outline of the hashing is: /// /// 1. class definition is serialized with python's [`sort_keys=True` option][py-sortkeys], then /// a truncated Keccak256 hash is calculated of the serialized json /// 2. a [hash chain][`HashChain`] construction is used to process in order the contract /// entry points, builtins, the truncated keccak hash and bytecodes /// 3. each of the hashchains is hash chained together to produce a final class hash /// /// Hash chain construction is explained at the [official documentation][starknet-doc], but it's /// text explanations are much more complex than the actual implementation in `HashChain`, which /// you can find from source file of this function. /// /// [starknet-doc]: https://starknet.io/documentation/contracts/#contract_hash /// [cairo-compute]: https://github.com/starkware-libs/cairo-lang/blob/64a7f6aed9757d3d8d6c28bd972df73272b0cb0a/src/starkware/starknet/core/os/contract_hash.py /// [cairo-contract]: https://github.com/starkware-libs/cairo-lang/blob/64a7f6aed9757d3d8d6c28bd972df73272b0cb0a/src/starkware/starknet/core/os/contracts.cairo#L76-L118 /// [py-sortkeys]: https://github.com/starkware-libs/cairo-lang/blob/64a7f6aed9757d3d8d6c28bd972df73272b0cb0a/src/starkware/starknet/core/os/contract_hash.py#L58-L71 pub fn compute_class_hash(contract_definition_dump: &[u8]) -> Result<ClassHash> { let contract_definition = serde_json::from_slice::<json::ContractDefinition<'_>>(contract_definition_dump) .context("Failed to parse contract_definition")?; compute_class_hash0(contract_definition).context("Compute class hash") } /// Sibling functionality to only [`compute_class_hash`], returning also the ABI, and bytecode /// parts as json bytes. pub fn extract_abi_code_hash( contract_definition_dump: &[u8], ) -> Result<(Vec<u8>, Vec<u8>, ClassHash)> { let contract_definition = serde_json::from_slice::<json::ContractDefinition<'_>>(contract_definition_dump) .context("Failed to parse contract_definition")?; // just in case we'd accidentially modify these in the compute_class_hash0 let abi = serde_json::to_vec(&contract_definition.abi) .context("Serialize contract_definition.abi")?; let code = serde_json::to_vec(&contract_definition.program.data) .context("Serialize contract_definition.program.data")?; let hash = compute_class_hash0(contract_definition).context("Compute class hash")?; Ok((abi, code, hash)) } /// Extract JSON representation of program and entry points from the contract definition. pub(crate) fn extract_program_and_entry_points_by_type( contract_definition_dump: &[u8], ) -> Result<(serde_json::Value, serde_json::Value)> { #[derive(serde::Deserialize)] struct ContractDefinition { pub program: serde_json::Value, pub entry_points_by_type: serde_json::Value, } let contract_definition = serde_json::from_slice::<ContractDefinition>(contract_definition_dump) .context("Failed to parse contract_definition")?; Ok(( contract_definition.program, contract_definition.entry_points_by_type, )) } fn compute_class_hash0(mut contract_definition: json::ContractDefinition<'_>) -> Result<ClassHash> { use EntryPointType::*; // the other modification is handled by skipping if the attributes vec is empty contract_definition.program.debug_info = None; // Cairo 0.8 added "accessible_scopes" and "flow_tracking_data" attribute fields, which were // not present in older contracts. They present as null / empty for older contracts and should // not be included in the hash calculation in these cases. // // We therefore check and remove them from the definition before calculating the hash. contract_definition .program .attributes .iter_mut() .try_for_each(|attr| -> anyhow::Result<()> { let vals = attr .as_object_mut() .context("Program attribute was not an object")?; match vals.get_mut("accessible_scopes") { Some(serde_json::Value::Array(array)) => { if array.is_empty() { vals.remove("accessible_scopes"); } } Some(_other) => { anyhow::bail!( r#"A program's attribute["accessible_scopes"] was not an array type."# ); } None => {} } // We don't know what this type is supposed to be, but if its missing it is null. if let Some(serde_json::Value::Null) = vals.get_mut("flow_tracking_data") { vals.remove("flow_tracking_data"); } Ok(()) })?; let truncated_keccak = { let mut ser = serde_json::Serializer::with_formatter(KeccakWriter::default(), PythonDefaultFormatter); contract_definition .serialize(&mut ser) .context("Serializing contract_definition for Keccak256")?; let KeccakWriter(hash) = ser.into_inner(); truncated_keccak(<[u8; 32]>::from(hash.finalize())) }; // what follows is defined over at the contract.cairo const API_VERSION: StarkHash = StarkHash::ZERO; let mut outer = HashChain::default(); // This wasn't in the docs, but similarly to contract_state hash, we start with this 0, so this // will yield outer == H(0, 0); However, dissimilarly to contract_state hash, we do include the // number of items in this class_hash. outer.update(API_VERSION); // It is important process the different entrypoint hashchains in correct order. // Each of the entrypoint lists gets updated into the `outer` hashchain. // // This implementation doesn't preparse the strings, which makes it a bit more noisy. Late // parsing is made in an attempt to lean on the one big string allocation we've already got, // but these three hash chains could be constructed at deserialization time. [External, L1Handler, Constructor] .iter() .map(|key| { contract_definition .entry_points_by_type .get(key) .unwrap_or(&Vec::new()) .iter() // flatten each entry point to get a list of (selector, offset, selector, offset, ...) .flat_map(|x| [x.selector.0, x.offset.0].into_iter()) .fold(HashChain::default(), |mut hc, next| { hc.update(next); hc }) }) .for_each(|x| outer.update(x.finalize())); let builtins = contract_definition .program .builtins .iter() .enumerate() .map(|(i, s)| (i, s.as_bytes())) .map(|(i, s)| { StarkHash::from_be_slice(s).with_context(|| format!("Invalid builtin at index {i}")) }) .try_fold(HashChain::default(), |mut hc, next| { hc.update(next?); Result::<_, Error>::Ok(hc) }) .context("Failed to process contract_definition.program.builtins")?; outer.update(builtins.finalize()); outer.update(truncated_keccak); let bytecodes = contract_definition .program .data .iter() .enumerate() .map(|(i, s)| { StarkHash::from_hex_str(s).with_context(|| format!("Invalid bytecode at index {i}")) }) .try_fold(HashChain::default(), |mut hc, next| { hc.update(next?); Result::<_, Error>::Ok(hc) }) .context("Failed to process contract_definition.program.data")?; outer.update(bytecodes.finalize()); Ok(ClassHash(outer.finalize())) } /// See: /// <https://github.com/starkware-libs/cairo-lang/blob/64a7f6aed9757d3d8d6c28bd972df73272b0cb0a/src/starkware/starknet/public/abi.py#L21-L26> pub(crate) fn truncated_keccak(mut plain: [u8; 32]) -> StarkHash { // python code masks with (2**250 - 1) which starts 0x03 and is followed by 31 0xff in be // truncation is needed not to overflow the field element. plain[0] &= 0x03; StarkHash::from_be_bytes(plain).expect("cannot overflow: smaller than modulus") } /// `std::io::Write` adapter for Keccak256; we don't need the serialized version in /// compute_class_hash, but we need the truncated_keccak hash. /// /// When debugging mismatching hashes, it might be useful to check the length of each before trying /// to find the wrongly serialized spot. Example length > 500kB. #[derive(Default)] struct KeccakWriter(sha3::Keccak256); impl std::io::Write for KeccakWriter { fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { self.0.update(buf); Ok(buf.len()) } fn flush(&mut self) -> std::io::Result<()> { // noop is fine, we'll finalize after the write phase Ok(()) } } /// Starkware doesn't use compact formatting for JSON but default python formatting. /// This is required to hash to the same value after sorted serialization. struct PythonDefaultFormatter; impl serde_json::ser::Formatter for PythonDefaultFormatter { fn begin_array_value<W>(&mut self, writer: &mut W, first: bool) -> std::io::Result<()> where W: ?Sized + std::io::Write, { if first { Ok(()) } else { writer.write_all(b", ") } } fn begin_object_key<W>(&mut self, writer: &mut W, first: bool) -> std::io::Result<()> where W: ?Sized + std::io::Write, { if first { Ok(()) } else { writer.write_all(b", ") } } fn begin_object_value<W>(&mut self, writer: &mut W) -> std::io::Result<()> where W: ?Sized + std::io::Write, { writer.write_all(b": ") } } mod json { use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; use crate::sequencer::request::contract::{EntryPointType, SelectorAndOffset}; /// Our version of the cairo contract definition used to deserialize and re-serialize a /// modified version for a hash of the contract definition. /// /// The implementation uses `serde_json::Value` extensively for the unknown/undefined /// structure, and the correctness of this implementation depends on the following features of /// serde_json: /// /// - feature `raw_value` has to be enabled for the thrown away `program.debug_info` /// - feature `preserve_order` has to be disabled, as we want everything sorted /// - feature `arbitrary_precision` has to be enabled, as there are big integers in the input /// /// It would be much more efficient to have a serde_json::Value which would only hold borrowed /// types. #[derive(serde::Deserialize, serde::Serialize)] #[serde(deny_unknown_fields)] pub struct ContractDefinition<'a> { /// Contract ABI, which has no schema definition. pub abi: serde_json::Value, /// Main program definition. #[serde(borrow)] pub program: Program<'a>, /// The contract entry points. /// /// These are left out of the re-serialized version with the ordering requirement to a /// Keccak256 hash. #[serde(skip_serializing)] pub entry_points_by_type: HashMap<EntryPointType, Vec<SelectorAndOffset>>, } // It's important that this is ordered alphabetically because the fields need to be in // sorted order for the keccak hashed representation. #[derive(serde::Deserialize, serde::Serialize)] #[serde(deny_unknown_fields)] pub struct Program<'a> { #[serde(skip_serializing_if = "Vec::is_empty", default)] pub attributes: Vec<serde_json::Value>, #[serde(borrow)] pub builtins: Vec<Cow<'a, str>>, #[serde(borrow)] pub data: Vec<Cow<'a, str>>, #[serde(borrow)] pub debug_info: Option<&'a serde_json::value::RawValue>, // Important that this is ordered by the numeric keys, not lexicographically pub hints: BTreeMap<u64, Vec<serde_json::Value>>, pub identifiers: serde_json::Value, #[serde(borrow)] pub main_scope: Cow<'a, str>, // Unlike most other integers, this one is hex string. We don't need to interpret it, // it just needs to be part of the hashed output. #[serde(borrow)] pub prime: Cow<'a, str>, pub reference_manager: serde_json::Value, } #[cfg(test)] mod roundtrip_tests { // FIXME: we should have many test cases utilizing this. #[allow(unused)] fn roundtrips<'a, T>(input: &'a str) where T: serde::Deserialize<'a> + serde::Serialize, { use super::super::PythonDefaultFormatter; let parsed: T = serde_json::from_str(input).unwrap(); let mut ser = serde_json::Serializer::with_formatter(Vec::new(), PythonDefaultFormatter); parsed.serialize(&mut ser).unwrap(); let bytes = ser.into_inner(); let output = std::str::from_utf8(&bytes).expect("serde does this unchecked"); // these need to be byte for byte equal because we hash this assert_eq!(input, output); } } #[cfg(test)] mod test_vectors { #[tokio::test] async fn first() { // this test is a bit on the slow side because of the download and because of the long // processing time in dev builds. expected --release speed is 9 contracts/s. let expected = crate::starkhash!( "0031da92cf5f54bcb81b447e219e2b791b23f3052d12b6c9abd04ff2e5626576" ); // this is quite big payload, ~500kB let resp = reqwest::get("https://external.integration.starknet.io/feeder_gateway/get_full_contract?blockNumber=latest&contractAddress=0x4ae0618c330c59559a59a27d143dd1c07cd74cf4e5e5a7cd85d53c6bf0e89dc") .await .unwrap(); let payload = resp.text().await.expect("response wasn't a string"); // for bad urls the response looks like: // 500 // {"code": "StarknetErrorCode.UNINITIALIZED_CONTRACT", "message": "Contract with address 2116724861677265616176388745625154424116334641142188761834194304782006389228 is not deployed."} let hash = super::super::compute_class_hash(payload.as_bytes()).unwrap(); assert_eq!(hash.0, expected); } #[test] fn second() { let contract_definition = zstd::decode_all( // opening up a file requires a path relative to the test running &include_bytes!("../../fixtures/contract_definition.json.zst")[..], ) .unwrap(); let hash = super::super::compute_class_hash(&contract_definition).unwrap(); assert_eq!( hash.0, crate::starkhash!( "050b2148c0d782914e0b12a1a32abe5e398930b7e914f82c65cb7afce0a0ab9b" ) ); } #[tokio::test] async fn genesis_contract() { use crate::sequencer::ClientApi; let contract = crate::starkhash!( "0546BA9763D33DC59A070C0D87D94F2DCAFA82C4A93B5E2BF5AE458B0013A9D3" ); let contract = crate::core::ContractAddress(contract); let chain = crate::core::Chain::Goerli; let sequencer = crate::sequencer::Client::new(chain).unwrap(); let contract_definition = sequencer .full_contract(contract) .await .expect("Download contract from sequencer"); let _ = super::super::compute_class_hash(&contract_definition) .expect("Extract and compute hash"); } #[tokio::test] async fn cairo_0_8() { // Cairo 0.8 update broke our class hash calculation by adding new attribute fields (which // we now need to ignore if empty). use super::super::extract_abi_code_hash; use crate::core::{ClassHash, ContractAddress}; use crate::sequencer::{self, ClientApi}; use crate::starkhash; // Known contract which triggered a hash mismatch failure. let address = ContractAddress(starkhash!( "0400D86342F474F14AAE562587F30855E127AD661F31793C49414228B54516EC" )); let expected = ClassHash(starkhash!( "056b96c1d1bbfa01af44b465763d1b71150fa00c6c9d54c3947f57e979ff68c3" )); let sequencer = sequencer::Client::new(crate::core::Chain::Goerli).unwrap(); let contract_definition = sequencer.full_contract(address).await.unwrap(); let extract = tokio::task::spawn_blocking(move || -> anyhow::Result<_> { let (abi, bytecode, hash) = extract_abi_code_hash(&contract_definition)?; Ok((contract_definition, abi, bytecode, hash)) }); let (_, _, _, calculate_hash) = extract.await.unwrap().unwrap(); assert_eq!(calculate_hash, expected); } } #[cfg(test)] mod test_serde_features { #[test] fn serde_json_value_sorts_maps() { // this property is leaned on and the default implementation of serde_json works like // this. serde_json has a feature called "preserve_order" which could get enabled by // accident, and it would destroy the ability to compute_class_hash. let input = r#"{"foo": 1, "bar": 2}"#; let parsed = serde_json::from_str::<serde_json::Value>(input).unwrap(); let output = serde_json::to_string(&parsed).unwrap(); assert_eq!(output, r#"{"bar":2,"foo":1}"#); } #[test] fn serde_json_has_arbitrary_precision() { // the json has 251-bit ints, python handles them out of box, serde_json requires // feature "arbitrary_precision". // this is 2**256 - 1 let input = r#"{"foo":115792089237316195423570985008687907853269984665640564039457584007913129639935}"#; let output = serde_json::to_string(&serde_json::from_str::<serde_json::Value>(input).unwrap()) .unwrap(); assert_eq!(input, output); } #[test] fn serde_json_has_raw_value() { // raw value is needed for others but here for completness; this shouldn't compile if // you the feature wasn't enabled. #[derive(serde::Deserialize, serde::Serialize)] struct Program<'a> { #[serde(borrow)] debug_info: Option<&'a serde_json::value::RawValue>, } let mut input = serde_json::from_str::<Program<'_>>( r#"{"debug_info": {"long": {"tree": { "which": ["we dont", "care", "about", 0] }}}}"#, ).unwrap(); input.debug_info = None; let output = serde_json::to_string(&input).unwrap(); assert_eq!(output, r#"{"debug_info":null}"#); } } } #[cfg(test)] mod tests { #[test] fn truncated_keccak_matches_pythonic() { use super::truncated_keccak; use crate::starkhash; use sha3::{Digest, Keccak256}; let all_set = Keccak256::digest(&[0xffu8; 32]); assert!(all_set[0] > 0xf); let truncated = truncated_keccak(all_set.into()); assert_eq!( truncated, starkhash!("01c584056064687e149968cbab758a3376d22aedc6a55823d1b3ecbee81b8fb9") ); } }
#[allow(non_snake_case)] pub fn dB_to_linear(dB: f32) -> f32 { (10.0_f32).powf(dB / 20.) } #[allow(non_snake_case)] pub fn linear_to_dB(linear: f32) -> f32 { 20. * (linear + 1e-30).log10() } pub fn normalize(value: f32, max: f32, min: f32) -> f32 { let clipped_value = if value > max { max } else if value < min { min } else { value }; (clipped_value - min) / (max - min) } pub fn denormalize(value: f32, max: f32, min: f32) -> f32 { let clipped_value = if value > 1.0 { 1.0 } else if value < 0.0 { 0.0 } else { value }; (max - min) * clipped_value + min }
use thrift::protocol::{TCompactInputProtocol, TCompactOutputProtocol}; use thrift::transport::{TFramedReadTransport, TFramedWriteTransport, TIoChannel, TTcpChannel}; use rpc_if::calc::{Work, Operation, CalcSyncClient, TCalcSyncClient}; use std::env; fn main() -> thrift::Result<()> { let address = env::args().nth(1).unwrap_or("127.0.0.1:9090".to_string()); run(address) } fn run(address: String) -> thrift::Result<()> { let mut channel = TTcpChannel::new(); channel.open(&address)?; let (i_chan, o_chan) = channel.split()?; let i_protocol = TCompactInputProtocol::new(TFramedReadTransport::new(i_chan)); let o_protocol = TCompactOutputProtocol::new(TFramedWriteTransport::new(o_chan)); let mut client = CalcSyncClient::new(i_protocol, o_protocol); let res = client.calc(Work { a: Some(10), b: Some(20), op: Some(Operation::Add) })?; println!("Reponse: {}", res); Ok(()) }
use std::{io::{self, Read}, net::Shutdown, sync::mpsc::{self, Receiver}, thread, time::{Duration, Instant}}; use io::ErrorKind; use mio::{Events, Interest, net::TcpStream}; use crate::{client::{tui::Tui, udp_connection::UdpConnectionState}, common::{debug_message::DebugMessageType, message_type::{InterthreadMessage, MsgType, msg_types}}}; use super::{ANNOUNCE_DELAY, CALL_DECAY, ConnectionManager, KEEP_ALIVE_DELAY, KEEP_ALIVE_DELAY_MIDCALL, RECONNECT_DELAY, RENDEZVOUS, STATS_UPDATE_DELAY, UDP_SOCKET, WAKER}; impl ConnectionManager { pub fn event_loop(&mut self, r: &mut Receiver<InterthreadMessage>) { let mut running = true; while running { let mut events = Events::with_capacity(1024); if !self.audio.started && self.udp_connections.iter().any(|c| c.upgraded && c.associated_peer.is_some()) { self.audio.init(); } // Calculate the next timeout let mut durations = vec![]; self.get_next_timeouts(&mut durations); self.poll.poll(&mut events, durations.first().cloned()).unwrap(); // Remove old call requests self.calls_in_progress.retain(|(_, time)| { return time.elapsed() < CALL_DECAY; }); // Send keep alive messages self.send_keep_alive_messages(); // Send reliable messages self.send_reliable_messages(); // Handle interthread messages self.handle_interthread_messages(r, &mut running); // Handle IO events self.handle_io_events(&events); // Send UI updates self.send_ui_updates(); } } fn send_keep_alive_messages(&mut self) { for conn in &mut self.udp_connections { match conn.state { UdpConnectionState::MidCall | UdpConnectionState::Connected => { let delay = match conn.state { UdpConnectionState::MidCall => KEEP_ALIVE_DELAY_MIDCALL, UdpConnectionState::Connected => KEEP_ALIVE_DELAY, _ => unreachable!() }; match conn.last_message_sent { Some(time) if time.elapsed() < delay => {} None | _ => { match conn.associated_peer.clone() { Some(public_key) => { conn.send_raw_message(MsgType::KeepAlive, &(), false, None); //TODO: Error handling Tui::debug_message(&format!("Sent keep alive message to ({})", public_key), DebugMessageType::Log, &self.ui_s); } None => { conn.send_raw_message(MsgType::KeepAlive, &(), false, None); //TODO: Error handling Tui::debug_message("Sent keep alive message to the rendezvous server", DebugMessageType::Log, &self.ui_s); } } } } } UdpConnectionState::Unannounced => { match conn.last_announce { Some(time) if time.elapsed() < ANNOUNCE_DELAY => {} None | _ => { let announce = msg_types::AnnouncePublic { public_key: self.encryption.get_public_key() }; conn.send_raw_message(MsgType::Announce, &announce, false, None); conn.last_announce = Some(Instant::now()); } } } UdpConnectionState::Pending => {} }; } } fn send_reliable_messages(&mut self) { for conn in &mut self.udp_connections { match conn.state { UdpConnectionState::Connected => { conn.resend_reliable_messages(); }, _ => {} }; } } fn handle_interthread_messages(&mut self, r: &mut Receiver<InterthreadMessage>, running: &mut bool) { loop { match r.try_recv() { Ok(data) => { match data { InterthreadMessage::SendChatMessage(p, msg, custom_id) => match self.send_udp_message(Some(p), MsgType::ChatMessage, &msg_types::ChatMessage {msg,}, true, Some(custom_id)) { Ok(_) => {} Err(e) => Tui::debug_message(&format!("Error while trying to send a chat message: {}", e.to_string()), DebugMessageType::Error, &self.ui_s) }, InterthreadMessage::OpusPacketReady(data) => { for conn in &mut self.udp_connections { if conn.upgraded && conn.associated_peer.is_some() { conn.send_udp_message(MsgType::OpusPacket, &data, false, None) // TODO: Indexing packets } } } InterthreadMessage::AudioDataReadyToBeProcessed(data) => self.audio.process_and_send_packet(data), InterthreadMessage::OnChatMessage(p, msg) => Tui::on_chat_message(&self.ui_s, p, msg), InterthreadMessage::ConnectToServer() => { self.rendezvous_socket = TcpStream::connect(self.rendezvous_ip).unwrap(); self.poll.registry().register(&mut self.rendezvous_socket, RENDEZVOUS, Interest::READABLE).unwrap(); Tui::debug_message("Trying to connect to server", DebugMessageType::Log, &self.ui_s); } InterthreadMessage::CallAccepted(p) => { let msg = msg_types::CallResponse { call: msg_types::Call { callee: self.encryption.get_public_key().clone(), caller: Some(p.clone()), udp_address: None }, response: true }; self.send_tcp_message(MsgType::CallResponse, &msg).unwrap(); let conn = self.udp_connections.iter_mut().find(|c| c.associated_peer.is_some() && c.associated_peer.clone().unwrap() == p).unwrap(); conn.state = UdpConnectionState::MidCall; let peer = self.peers.iter_mut().find(|peer| peer.public_key == p).unwrap(); peer.udp_addr = Some(conn.address); Tui::debug_message(&format!("Accepted call from peer ({};{}), starting the punch through protocol", p, conn.address), DebugMessageType::Log, &self.ui_s); } InterthreadMessage::CallDenied(p) => { let msg = msg_types::CallResponse { call: msg_types::Call { callee: self.encryption.get_public_key().clone(), caller: Some(p.clone()), udp_address: None }, response: false }; self.send_tcp_message(MsgType::CallResponse, &msg).unwrap(); let i = self.udp_connections.iter().position(|c| c.associated_peer.is_some() && c.associated_peer.clone().unwrap() == p).unwrap(); let conn = self.udp_connections.remove(i); Tui::debug_message(&format!("Denied call from peer ({};{})", p, conn.address), DebugMessageType::Log, &self.ui_s); } InterthreadMessage::Call(p) => { let peer = self.peers.iter().find(|peer| peer.public_key == p).unwrap(); if peer.udp_addr.is_some() { Tui::debug_message(&format!("Tried to call a peer which is already connected {}", p), DebugMessageType::Warning, &self.ui_s); continue; } let call = msg_types::Call { callee: p.clone(), caller: None, udp_address: None }; match self.calls_in_progress.iter().find(|(c, _)| c == &call) { Some(_) => Tui::debug_message(&format!("Tried to call a peer which has already been called: {}", p), DebugMessageType::Warning, &self.ui_s), None => { Tui::debug_message(&format!("Calling peer: {}", p), DebugMessageType::Log, &self.ui_s); self.calls_in_progress.push((call.clone(), Instant::now())); self.send_tcp_message(MsgType::Call, &call).unwrap(); //TODO: Error handling } } } InterthreadMessage::AudioChangeInputDevice(d) => self.audio.change_input_device(d), InterthreadMessage::AudioChangeOutputDevice(d) => self.audio.change_output_device(d), InterthreadMessage::AudioChangePreferredKbits(kbits) => self.audio.change_preferred_kbits(kbits), InterthreadMessage::AudioChangeMuteState(muted) => self.audio.change_mute_state(muted), InterthreadMessage::AudioChangeDenoiserState(denoiser_state) => self.audio.change_denoiser_state(denoiser_state), InterthreadMessage::Quit() => { match self.rendezvous_socket.shutdown(Shutdown::Both) { _ => {} } *running = false; return; }, _ => unreachable!() } } Err(mpsc::TryRecvError::Disconnected) => break, Err(mpsc::TryRecvError::Empty) => break } } } fn handle_io_events(&mut self, events: &Events) { for event in events.iter() { match event.token() { token => { loop { match token { WAKER => break, RENDEZVOUS => { let mut msg_type = [0;1]; match self.rendezvous_socket.read(&mut msg_type) { Ok(0) => { Tui::debug_message("Disconnected from rendezvous server", DebugMessageType::Warning, &self.ui_s); break; } Ok(_) => { self.read_tcp_message(msg_type[0], token); } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { // Socket is not ready anymore, stop reading break; } Err(e) if e.kind() == ErrorKind::ConnectionReset || e.kind() == ErrorKind::NotConnected => { match e.kind() { ErrorKind::ConnectionReset => Tui::debug_message(&format!("Disconnected from rendezvous server, reconnecting in {}", RECONNECT_DELAY.as_secs()), DebugMessageType::Warning, &self.ui_s), ErrorKind::NotConnected => Tui::debug_message(&format!("Reconnecting failed to rendezvous server, retrying in {}", RECONNECT_DELAY.as_secs()), DebugMessageType::Warning, &self.ui_s), _ => {} } self.poll.registry().deregister(&mut self.rendezvous_socket).unwrap(); self.try_server_reconnect(); break; }, e => panic!("err={:?}", e), // Unexpected error } }, UDP_SOCKET => { let mut buf = [0; 65536]; match self.udp_socket.recv_from(&mut buf) { Ok(r) => { let (read, addr) = r; self.read_udp_message(read, addr, &buf); } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { // Socket is not ready anymore, stop reading break; } Err(ref e) if e.kind() == io::ErrorKind::ConnectionReset => { Tui::debug_message("Couldn't read from udp socket (ConnectionReset) ", DebugMessageType::Error, &self.ui_s); } e => println!("err={:?}", e), // Unexpected error } }, _ => unreachable!() } } } } } } fn send_ui_updates(&mut self) { if self.last_stats_update.elapsed() > STATS_UPDATE_DELAY { let mut stats = vec![]; for c in &mut self.udp_connections { if let Some(p) = &c.associated_peer { stats.push((p.clone(), c.statistics.clone())); } } self.ui_s.send(InterthreadMessage::ConnectionStatistics(stats)).unwrap(); self.last_stats_update = Instant::now(); } } /// Lists the next timeouts, and also sorts the list, so the first one is always the smallest fn get_next_timeouts(&mut self, durations: &mut Vec<Duration>) { for conn in &mut self.udp_connections { match conn.next_resendable() { Some(d) => durations.push(d), None => {} } durations.push(conn.next_keep_alive()); } let next_stats_update = (self.last_stats_update + STATS_UPDATE_DELAY).checked_duration_since(self.last_stats_update).unwrap_or(Duration::from_secs(0)); durations.push(next_stats_update); durations.sort_by(|a,b| a.cmp(b)); } fn try_server_reconnect(&mut self) { let cm_s = self.cm_s.clone(); thread::spawn(move || { thread::sleep(RECONNECT_DELAY); cm_s.send(InterthreadMessage::ConnectToServer()).unwrap(); }); } }
pub mod math; extern crate num_traits; #[cfg(test)] mod tests { mod test_sum { #[test] fn test_sum() { assert_eq!(crate::math::sum_one_to_n(10), 55); } #[test] fn test_sum_one() { assert_eq!(crate::math::sum_one_to_n(1), 1); } #[test] #[should_panic] fn test_sum_minus() { let minus: i32 = -1; // 負の値はpanicを返す crate::math::sum_one_to_n(minus); } #[test] #[should_panic] fn test_sum_zero() { let zero: i32 = 0; // 0はpanicを返す crate::math::sum_one_to_n(zero); } } mod test_gcd { #[test] fn test_gcd() { assert_eq!(12, crate::math::gcd(12, 24)); assert_eq!(3, crate::math::gcd(12, 21)); assert_eq!(1, crate::math::gcd(12, 1)); } #[test] #[should_panic] fn test_arg_zero() { crate::math::gcd(12, 0); } #[test] #[should_panic] fn test_arg_minus() { crate::math::gcd(-2, -3); } } mod test_lcm { #[test] fn test_lcm() { assert_eq!(24, crate::math::lcm(12, 24)); assert_eq!(84, crate::math::lcm(12, 21)); assert_eq!(12, crate::math::lcm(12, 1)); } #[test] #[should_panic] fn test_arg_zero() { crate::math::lcm(12, 0); } #[test] #[should_panic] fn test_arg_minus() { crate::math::lcm(-2, -3); } } mod test_fractal { #[test] fn test_fractal() { assert_eq!(720, crate::math::factorial(6)); } #[test] fn test_arg_zero() { assert_eq!(1, crate::math::factorial(0)); } #[test] #[should_panic] fn test_arg_minus() { crate::math::factorial(-2); } } mod test_pow { #[test] fn test_pow() { assert_eq!(4, crate::math::pow(2, 2)); } #[test] fn test_arg_zero() { assert_eq!(1, crate::math::pow(3, 0)); } #[test] fn test_arg_minus() { assert_eq!(4, crate::math::pow(-2, 2)); } } }
#![feature(proc_macro_hygiene, decl_macro)] #[macro_use] extern crate rocket; #[macro_use] extern crate rocket_contrib; #[macro_use] extern crate serde_derive; use rocket_contrib::json::Json; use rocket_contrib::json::JsonValue; mod person; use person::{Person}; #[post("/", data = "<person>")] fn create(mut person: Json<Person>) -> Json<Person> { person.id = Some(11); person } #[get("/")] fn read() -> JsonValue { json!([ "person 1", "person 2" ]) } #[put("/<id>", data = "<person>")] fn update(id: i32, person: Json<Person>) -> Json<Person> { person } #[delete("/<id>")] fn delete(id: i32) -> JsonValue { json!({"status": "ok"}) } fn main() { rocket::ignite() .mount("/person", routes![create, update, delete]) .mount("/persons", routes![read]) .launch(); }
extern crate rustbox; extern crate rustify; #[derive(Debug, PartialEq)] pub enum Action { Select, PlayNextTrack, QueueTrack, TogglePlayback, FilterList(String), SearchTrack(String), MoveBottom, MoveDown, MoveTop, MoveUp, Back, Quit, Noop, } pub fn next_action(rustify_event: Option<rustify::Event>) -> Action { if rustify_event.is_some() { return next_rustify_event(rustify_event.unwrap()); } else { return Action::Noop; } } fn next_rustify_event(event: rustify::Event) -> Action { return match event { rustify::Event::EndOfTrack => Action::PlayNextTrack, _ => Action::Noop, } }
use crate::backend::input::KeyState; use std::{ cell::RefCell, default::Default, io::{Error as IoError, Write}, os::unix::io::AsRawFd, rc::Rc, }; use tempfile::tempfile; use wayland_server::{ protocol::{ wl_keyboard::{KeyState as WlKeyState, KeymapFormat, Request, WlKeyboard}, wl_surface::WlSurface, }, Client, NewResource, }; use xkbcommon::xkb; pub use xkbcommon::xkb::{keysyms, Keysym}; /// Represents the current state of the keyboard modifiers /// /// Each field of this struct represents a modifier and is `true` if this modifier is active. /// /// For some modifiers, this means that the key is currently pressed, others are toggled /// (like caps lock). #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct ModifiersState { /// The "control" key pub ctrl: bool, /// The "alt" key pub alt: bool, /// The "shift" key pub shift: bool, /// The "Caps lock" key pub caps_lock: bool, /// The "logo" key /// /// Also known as the "windows" key on most keyboards pub logo: bool, /// The "Num lock" key pub num_lock: bool, } impl ModifiersState { fn new() -> ModifiersState { ModifiersState { ctrl: false, alt: false, shift: false, caps_lock: false, logo: false, num_lock: false, } } fn update_with(&mut self, state: &xkb::State) { self.ctrl = state.mod_name_is_active(&xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE); self.alt = state.mod_name_is_active(&xkb::MOD_NAME_ALT, xkb::STATE_MODS_EFFECTIVE); self.shift = state.mod_name_is_active(&xkb::MOD_NAME_SHIFT, xkb::STATE_MODS_EFFECTIVE); self.caps_lock = state.mod_name_is_active(&xkb::MOD_NAME_CAPS, xkb::STATE_MODS_EFFECTIVE); self.logo = state.mod_name_is_active(&xkb::MOD_NAME_LOGO, xkb::STATE_MODS_EFFECTIVE); self.num_lock = state.mod_name_is_active(&xkb::MOD_NAME_NUM, xkb::STATE_MODS_EFFECTIVE); } } /// Configuration for xkbcommon. /// /// For the fields that are not set ("" or None, as set in the `Default` impl), xkbcommon will use /// the values from the environment variables `XKB_DEFAULT_RULES`, `XKB_DEFAULT_MODEL`, /// `XKB_DEFAULT_LAYOUT`, `XKB_DEFAULT_VARIANT` and `XKB_DEFAULT_OPTIONS`. /// /// For details, see the [documentation at xkbcommon.org][docs]. /// /// [docs]: https://xkbcommon.org/doc/current/structxkb__rule__names.html #[derive(Clone, Debug)] pub struct XkbConfig<'a> { /// The rules file to use. /// /// The rules file describes how to interpret the values of the model, layout, variant and /// options fields. pub rules: &'a str, /// The keyboard model by which to interpret keycodes and LEDs. pub model: &'a str, /// A comma separated list of layouts (languages) to include in the keymap. pub layout: &'a str, /// A comma separated list of variants, one per layout, which may modify or augment the /// respective layout in various ways. pub variant: &'a str, /// A comma separated list of options, through which the user specifies non-layout related /// preferences, like which key combinations are used for switching layouts, or which key is the /// Compose key. pub options: Option<String>, } impl<'a> Default for XkbConfig<'a> { fn default() -> Self { Self { rules: "", model: "", layout: "", variant: "", options: None, } } } struct KbdInternal { known_kbds: Vec<WlKeyboard>, focus: Option<WlSurface>, pressed_keys: Vec<u32>, mods_state: ModifiersState, keymap: xkb::Keymap, state: xkb::State, repeat_rate: i32, repeat_delay: i32, focus_hook: Box<dyn FnMut(Option<&WlSurface>)>, } // This is OK because all parts of `xkb` will remain on the // same thread unsafe impl Send for KbdInternal {} impl KbdInternal { fn new( xkb_config: XkbConfig<'_>, repeat_rate: i32, repeat_delay: i32, focus_hook: Box<dyn FnMut(Option<&WlSurface>)>, ) -> Result<KbdInternal, ()> { // we create a new contex for each keyboard because libxkbcommon is actually NOT threadsafe // so confining it inside the KbdInternal allows us to use Rusts mutability rules to make // sure nothing goes wrong. // // FIXME: This is an issue with the xkbcommon-rs crate that does not reflect this // non-threadsafety properly. let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS); let keymap = xkb::Keymap::new_from_names( &context, &xkb_config.rules, &xkb_config.model, &xkb_config.layout, &xkb_config.variant, xkb_config.options, xkb::KEYMAP_COMPILE_NO_FLAGS, ) .ok_or(())?; let state = xkb::State::new(&keymap); Ok(KbdInternal { known_kbds: Vec::new(), focus: None, pressed_keys: Vec::new(), mods_state: ModifiersState::new(), keymap, state, repeat_rate, repeat_delay, focus_hook, }) } // return true if modifier state has changed fn key_input(&mut self, keycode: u32, state: KeyState) -> bool { // track pressed keys as xkbcommon does not seem to expose it :( let direction = match state { KeyState::Pressed => { self.pressed_keys.push(keycode); xkb::KeyDirection::Down } KeyState::Released => { self.pressed_keys.retain(|&k| k != keycode); xkb::KeyDirection::Up } }; // update state // Offset the keycode by 8, as the evdev XKB rules reflect X's // broken keycode system, which starts at 8. let state_components = self.state.update_key(keycode + 8, direction); if state_components != 0 { self.mods_state.update_with(&self.state); true } else { false } } fn serialize_modifiers(&self) -> (u32, u32, u32, u32) { let mods_depressed = self.state.serialize_mods(xkb::STATE_MODS_DEPRESSED); let mods_latched = self.state.serialize_mods(xkb::STATE_MODS_LATCHED); let mods_locked = self.state.serialize_mods(xkb::STATE_MODS_LOCKED); let layout_locked = self.state.serialize_layout(xkb::STATE_LAYOUT_LOCKED); (mods_depressed, mods_latched, mods_locked, layout_locked) } fn serialize_pressed_keys(&self) -> Vec<u8> { let serialized = unsafe { ::std::slice::from_raw_parts( self.pressed_keys.as_ptr() as *const u8, self.pressed_keys.len() * 4, ) }; serialized.into() } fn with_focused_kbds<F>(&self, mut f: F) where F: FnMut(&WlKeyboard, &WlSurface), { if let Some(ref surface) = self.focus { for kbd in &self.known_kbds { if kbd.as_ref().same_client_as(surface.as_ref()) { f(kbd, surface); } } } } } /// Errors that can be encountered when creating a keyboard handler #[derive(Debug)] pub enum Error { /// libxkbcommon could not load the specified keymap BadKeymap, /// Smithay could not create a tempfile to share the keymap with clients IoError(IoError), } /// Create a keyboard handler from a set of RMLVO rules pub(crate) fn create_keyboard_handler<F>( xkb_config: XkbConfig<'_>, repeat_delay: i32, repeat_rate: i32, logger: &::slog::Logger, focus_hook: F, ) -> Result<KeyboardHandle, Error> where F: FnMut(Option<&WlSurface>) + 'static, { let log = logger.new(o!("smithay_module" => "xkbcommon_handler")); info!(log, "Initializing a xkbcommon handler with keymap query"; "rules" => xkb_config.rules, "model" => xkb_config.model, "layout" => xkb_config.layout, "variant" => xkb_config.variant, "options" => &xkb_config.options ); let internal = KbdInternal::new(xkb_config, repeat_rate, repeat_delay, Box::new(focus_hook)).map_err(|_| { debug!(log, "Loading keymap failed"); Error::BadKeymap })?; info!(log, "Loaded Keymap"; "name" => internal.keymap.layouts().next()); let keymap = internal.keymap.get_as_string(xkb::KEYMAP_FORMAT_TEXT_V1); Ok(KeyboardHandle { arc: Rc::new(KbdRc { internal: RefCell::new(internal), keymap, logger: log, }), }) } struct KbdRc { internal: RefCell<KbdInternal>, keymap: String, logger: ::slog::Logger, } /// An handle to a keyboard handler /// /// It can be cloned and all clones manipulate the same internal state. /// /// This handle gives you 2 main ways to interact with the keyboard handling: /// /// - set the current focus for this keyboard: designing the surface that will receive the key inputs /// using the [`KeyboardHandle::set_focus`] method. /// - process key inputs from the input backend, allowing them to be caught at the compositor-level /// or forwarded to the client. See the documentation of the [`KeyboardHandle::input`] method for /// details. #[derive(Clone)] pub struct KeyboardHandle { arc: Rc<KbdRc>, } impl KeyboardHandle { /// Handle a keystroke /// /// All keystrokes from the input backend should be fed _in order_ to this method of the /// keyboard handler. It will internally track the state of the keymap. /// /// The `filter` argument is expected to be a closure which will peek at the generated input /// as interpreted by the keymap before it is forwarded to the focused client. If this closure /// returns false, the input will not be sent to the client. This mechanism can be used to /// implement compositor-level key bindings for example. /// /// The module [`wayland::seat::keysyms`](::wayland::seat::keysyms) exposes definitions of all possible keysyms /// to be compared against. This includes non-character keysyms, such as XF86 special keys. pub fn input<F>(&self, keycode: u32, state: KeyState, serial: u32, time: u32, filter: F) where F: FnOnce(&ModifiersState, Keysym) -> bool, { trace!(self.arc.logger, "Handling keystroke"; "keycode" => keycode, "state" => format_args!("{:?}", state)); let mut guard = self.arc.internal.borrow_mut(); // Offset the keycode by 8, as the evdev XKB rules reflect X's // broken keycode system, which starts at 8. let sym = guard.state.key_get_one_sym(keycode + 8); let mods_changed = guard.key_input(keycode, state); trace!(self.arc.logger, "Calling input filter"; "mods_state" => format_args!("{:?}", guard.mods_state), "sym" => xkb::keysym_get_name(sym) ); if !filter(&guard.mods_state, sym) { // the filter returned false, we do not forward to client trace!(self.arc.logger, "Input was intercepted by filter"); return; } // forward to client if no keybinding is triggered let modifiers = if mods_changed { Some(guard.serialize_modifiers()) } else { None }; let wl_state = match state { KeyState::Pressed => WlKeyState::Pressed, KeyState::Released => WlKeyState::Released, }; guard.with_focused_kbds(|kbd, _| { if let Some((dep, la, lo, gr)) = modifiers { kbd.modifiers(serial, dep, la, lo, gr); } kbd.key(serial, time, keycode, wl_state); }); if guard.focus.is_some() { trace!(self.arc.logger, "Input forwarded to client"); } else { trace!(self.arc.logger, "No client currently focused"); } } /// Set the current focus of this keyboard /// /// If the new focus is different from the previous one, any previous focus /// will be sent a [`wl_keyboard::Event::Leave`](wayland_server::protocol::wl_keyboard::Event::Leave) /// event, and if the new focus is not `None`, /// a [`wl_keyboard::Event::Enter`](wayland_server::protocol::wl_keyboard::Event::Enter) event will be sent. pub fn set_focus(&self, focus: Option<&WlSurface>, serial: u32) { let mut guard = self.arc.internal.borrow_mut(); let same = guard .focus .as_ref() .and_then(|f| focus.map(|s| s.as_ref().equals(f.as_ref()))) .unwrap_or(false); if !same { // unset old focus guard.with_focused_kbds(|kbd, s| { kbd.leave(serial, &s); }); // set new focus guard.focus = focus.cloned(); let (dep, la, lo, gr) = guard.serialize_modifiers(); let keys = guard.serialize_pressed_keys(); guard.with_focused_kbds(|kbd, surface| { kbd.modifiers(serial, dep, la, lo, gr); kbd.enter(serial, &surface, keys.clone()); }); { let KbdInternal { ref focus, ref mut focus_hook, .. } = *guard; focus_hook(focus.as_ref()); } if guard.focus.is_some() { trace!(self.arc.logger, "Focus set to new surface"); } else { trace!(self.arc.logger, "Focus unset"); } } else { trace!(self.arc.logger, "Focus unchanged"); } } /// Check if given client currently has keyboard focus pub fn has_focus(&self, client: &Client) -> bool { self.arc .internal .borrow_mut() .focus .as_ref() .and_then(|f| f.as_ref().client()) .map(|c| c.equals(client)) .unwrap_or(false) } /// Register a new keyboard to this handler /// /// The keymap will automatically be sent to it /// /// This should be done first, before anything else is done with this keyboard. pub(crate) fn new_kbd(&self, kbd: WlKeyboard) { trace!(self.arc.logger, "Sending keymap to client"); // prepare a tempfile with the keymap, to send it to the client let ret = tempfile().and_then(|mut f| { f.write_all(self.arc.keymap.as_bytes())?; f.flush()?; kbd.keymap( KeymapFormat::XkbV1, f.as_raw_fd(), self.arc.keymap.as_bytes().len() as u32, ); Ok(()) }); if let Err(e) = ret { warn!(self.arc.logger, "Failed write keymap to client in a tempfile"; "err" => format!("{:?}", e) ); return; }; let mut guard = self.arc.internal.borrow_mut(); if kbd.as_ref().version() >= 4 { kbd.repeat_info(guard.repeat_rate, guard.repeat_delay); } guard.known_kbds.push(kbd); } /// Change the repeat info configured for this keyboard pub fn change_repeat_info(&self, rate: i32, delay: i32) { let mut guard = self.arc.internal.borrow_mut(); guard.repeat_delay = delay; guard.repeat_rate = rate; for kbd in &guard.known_kbds { kbd.repeat_info(rate, delay); } } } pub(crate) fn implement_keyboard( new_keyboard: NewResource<WlKeyboard>, handle: Option<&KeyboardHandle>, ) -> WlKeyboard { let destructor = match handle { Some(h) => { let arc = h.arc.clone(); Some(move |keyboard: WlKeyboard| { arc.internal .borrow_mut() .known_kbds .retain(|k| !k.as_ref().equals(&keyboard.as_ref())) }) } None => None, }; new_keyboard.implement_closure( |request, _keyboard| { match request { Request::Release => { // Our destructors already handle it } _ => unreachable!(), } }, destructor, (), ) }
#![no_main] #![no_std] mod term; use core::marker::PhantomData; use cortex_m_rt::entry; use embedded_hal::digital::v2::OutputPin; use hal::{ delay::Delay, gpio::gpiob::Parts, pac::{CorePeripherals, Peripherals}, prelude::*, }; use panic_semihosting as _; use ssd1963::{GpioReadWrite16BitInterface, Screen}; use stm32f1xx_gpio16bit::RwPortB; use stm32f1xx_hal as hal; use crate::term::{font::ThisFont, vertical_scroller::CopyScroller, Term}; #[entry] fn main() -> ! { let dp = Peripherals::take().unwrap(); let cp = CorePeripherals::take().unwrap(); let mut rcc = dp.RCC.constrain(); let mut flash = dp.FLASH.constrain(); let mut afio = dp.AFIO.constrain(&mut rcc.apb2); let clocks = rcc // .cfgr // .sysclk(36.mhz()) // .hclk(36.mhz()) // .pclk1(36.mhz()) // .pclk2(64.mhz()) // .freeze(&mut flash.acr); .cfgr .use_hse(8.mhz()) .sysclk(72.mhz()) .pclk1(36.mhz()) .pclk2(72.mhz()) .freeze(&mut flash.acr); let mut gpioa = dp.GPIOA.split(&mut rcc.apb2); let Parts { pb0, pb1, pb2, pb3, pb4, pb5, pb6, pb7, pb8, pb9, pb10, pb11, pb12, pb13, pb14, pb15, crl, crh, } = dp.GPIOB.split(&mut rcc.apb2); let (_, pb3, pb4) = afio.mapr.disable_jtag(gpioa.pa15, pb3, pb4); let interface = GpioReadWrite16BitInterface::new( RwPortB::new( pb0, pb1, pb2, pb3, pb4, pb5, pb6, pb7, pb8, pb9, pb10, pb11, pb12, pb13, pb14, pb15, crl, crh, ), gpioa.pa1.into_push_pull_output(&mut gpioa.crl), // DC gpioa.pa2.into_push_pull_output(&mut gpioa.crl), // WR gpioa.pa3.into_push_pull_output(&mut gpioa.crl), // RD ); let mut cs = gpioa.pa4.into_push_pull_output(&mut gpioa.crl); // LCD_CS cs.set_low().unwrap(); // let mut rd = gpioa.pa3.into_push_pull_output(&mut gpioa.crl); // LCD_RD // rd.set_high().unwrap(); // let mut reset = gpioa.pa0.into_push_pull_output(&mut gpioa.crl); // LCD_RESET // reset.set_high().unwrap(); struct Gradient<Lcd: Screen> { line: u16, col: u16, _p: PhantomData<Lcd>, } impl<Lcd: Screen> Gradient<Lcd> { pub fn new() -> Self { Self { line: 0, col: 0, _p: PhantomData, } } } impl<Lcd: Screen> Iterator for Gradient<Lcd> { type Item = u16; fn next(&mut self) -> Option<Self::Item> { self.col = if self.col == Lcd::WIDTH - 1 { self.line += 1; if self.line == Lcd::HEIGHT - 1 { return None; } 0 } else { self.col + 1 }; const RED_MAX: u16 = 0b11111; const GREEN_MAX: u16 = 0b111111; const BLUE_MAX: u16 = 0b11111; let red = self.line / ((Lcd::HEIGHT + RED_MAX) / (RED_MAX + 1)) << 11; let green = self.line / ((Lcd::HEIGHT + GREEN_MAX) / (GREEN_MAX + 1)) << 5; let blue = self.line / ((Lcd::HEIGHT + BLUE_MAX) / (BLUE_MAX + 1)) << 0; Some(red | green | blue) } } let mut disp = ssd1963::Ssd1963::new(ssd1963::Lcd800x480, interface, Delay::new(cp.SYST, clocks)).unwrap(); disp.fill_area_color(.., .., 0).unwrap(); // disp.fill_area(.., .., &mut Gradient::<Lcd800x480>::new()).unwrap(); let mut buffer = [0u16; 9000]; let scroller = CopyScroller::new(&mut buffer); // scroller.scroll_area(&mut disp, 0..100, 100..479, 100, -100).unwrap(); // disp.fill_area_color(0..480, 380..=380, 0b11111100000).unwrap(); // let mut x: u16 = 0; // let mut y: u16 = 0; // let mut speed_x: i16 = 1; // let mut speed_y: i16 = 1; // let width: u16 = 100; // let height: u16 = 100; // use core::convert::TryFrom; // disp.fill_area_color(.., .., 0).unwrap(); let mut term = Term::new(&mut disp, &ThisFont, scroller).dimensions(.., 8..); use core::fmt::Write; for i in 0..100 { writeln!(&mut term, "{:3} Hello, world!", i).unwrap(); } // let mut x: u16 = 0; // let mut y: u16 = 0; // let mut speed_x: i16 = 1; // let mut speed_y: i16 = 1; // let width: u16 = 100; // let height: u16 = 100; // use core::convert::TryFrom; // loop { // disp.fill_area_color(x..x + width, y..y + width, 0b1111111111111111).unwrap(); // let mut it = text_to_pixels(&term::font::ThisFont, "Shupaci!!!").map(|b| if b { 0 } else { 0b1111111111111111 }); // disp.fill_area(x + 10..x + 10 + 9 * 8, y + 46..y + 46 + 8, &mut it).unwrap(); // for _ in 0..98 { // disp.delay.delay_us(255u8); // } // disp.delay.delay_us(165u8); // disp.fill_area_color(x..x + width, y..y + width, 0).unwrap(); // x = (i16::try_from(x).unwrap() + speed_x).try_into().unwrap(); // y = (i16::try_from(y).unwrap() + speed_y).try_into().unwrap(); // if x == 0 { // speed_x = 1; // } else if x + width == disp.width() { // speed_x = -1; // } // if y == 0 { // speed_y = 1; // } else if y + height == disp.height() { // speed_y = -1; // } // } loop {} }
use rand::Rng; fn main() { // if let a = 1; if a > 0 { println!("{} greater than 0", a); } else if a < 0 { println!("{} less than 0", a); } else { println!("{} equal to 0", a); } // 三元表达式 let b = rand::thread_rng().gen_range(1, 101); let string = if b > 50 { "大" } else { "小" }; println!("string: {}", string); // loop 循环 let mut c = 0; loop { c += 1; if c > 5 { break; } } c = 0; // loop 循环 break 可以返回结果 let result = loop { c += 1; if c == 10 { break c * 2; } }; println!("loop result: {}", result); // while 循环 let mut d = 0; while d < 3{ d +=1; }; println!("while result: {}", d); // for 循环 let a = [10, 20, 30, 40, 50]; for element in a.iter() { println!("for element: {}", element); }; // for range for i in (0..3).rev() { println!("for range: {}", i); } }
use super::{schema::artists, Postgres}; use artell_domain::artist::{Artist, ArtistId, ArtistRepository}; use diesel::prelude::*; use uuid::Uuid; pub struct PgArtistRepository { pg: Postgres, } impl PgArtistRepository { pub fn new(pg: Postgres) -> Self { PgArtistRepository { pg } } } #[async_trait] impl ArtistRepository for PgArtistRepository { async fn find_by_id(&self, id: Uuid) -> anyhow::Result<Option<Artist>> { #[derive(Queryable)] struct QueriedArtist { id: Uuid, name: String, email: String, status_msg: String, description: String, instagram: String, twitter: String, } self.pg .try_with_conn(move |conn| { Ok(artists::table .filter(artists::id.eq(id)) .select(( artists::id, artists::name, artists::email, artists::status_msg, artists::description, artists::instagram, artists::twitter, )) .first::<QueriedArtist>(&conn) .optional()? .map(|a| Artist { id: ArtistId(a.id), name: a.name, email: a.email, status_msg: a.status_msg, description: a.description, instagram: a.instagram, twitter: a.twitter, })) }) .await } async fn save(&self, artist: Artist) -> anyhow::Result<()> { #[derive(Clone, Copy, Insertable, AsChangeset)] #[table_name = "artists"] struct NewArtist<'a> { id: &'a Uuid, name: &'a str, email: &'a str, status_msg: &'a str, description: &'a str, instagram: &'a str, twitter: &'a str, } self.pg .try_with_conn(move |conn| { let new_artist = NewArtist { id: artist.id.as_ref(), name: artist.name.as_str(), email: artist.email.as_str(), status_msg: artist.status_msg.as_str(), description: artist.description.as_str(), instagram: artist.instagram.as_str(), twitter: artist.twitter.as_str(), }; diesel::insert_into(artists::table) .values(new_artist) .on_conflict(artists::id) .do_update() .set(new_artist) .execute(&conn)?; Ok(()) }) .await } }
// Magical Bitcoin Library // Written in 2020 by // Alekos Filini <alekos.filini@gmail.com> // // Copyright (c) 2020 Magical Bitcoin // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. //! BDK command line interface //! //! This lib provides [`structopt`] structs and enums that parse CLI options and sub-commands from //! the command line or from a `String` vector that can be used to access features of the [`bdk`] //! library. Functions are also provided to handle subcommands and options and provide results via //! the [`bdk`] lib. //! //! See the [`bdk-cli`] example bin for how to use this lib to create a simple command line //! application that demonstrates [`bdk`] wallet and key management features. //! //! See [`CliOpts`] for global cli options and [`CliSubCommand`] for supported top level sub-commands. //! //! [`structopt`]: https://docs.rs/crate/structopt //! [`bdk`]: https://github.com/bitcoindevkit/bdk //! [`bdk-cli`]: https://github.com/bitcoindevkit/bdk-cli/blob/master/src/bdk_cli.rs //! //! # Example //! //! ```no_run //! # #[cfg(feature = "electrum")] //! # { //! # use bdk::bitcoin::Network; //! # use bdk::blockchain::{AnyBlockchain, ConfigurableBlockchain}; //! # use bdk::blockchain::{AnyBlockchainConfig, ElectrumBlockchainConfig}; //! # use bdk_cli::{self, CliOpts, CliSubCommand, WalletOpts, OfflineWalletSubCommand, WalletSubCommand}; //! # use bdk::database::MemoryDatabase; //! # use bdk::Wallet; //! # use std::sync::Arc; //! # use structopt::StructOpt; //! # use std::str::FromStr; //! //! // to get args from cli use: //! // let cli_opts = CliOpts::from_args(); //! //! let cli_args = vec!["bdk-cli", "--network", "testnet", "wallet", "--descriptor", //! "wpkh(tpubEBr4i6yk5nf5DAaJpsi9N2pPYBeJ7fZ5Z9rmN4977iYLCGco1VyjB9tvvuvYtfZzjD5A8igzgw3HeWeeKFmanHYqksqZXYXGsw5zjnj7KM9/*)", //! "sync", "--max_addresses", "50"]; //! //! let cli_opts = CliOpts::from_iter(&cli_args); //! let network = cli_opts.network; //! //! if let CliSubCommand::Wallet { //! wallet_opts, //! subcommand: WalletSubCommand::OnlineWalletSubCommand(online_subcommand) //! } = cli_opts.subcommand { //! //! let descriptor = wallet_opts.descriptor.as_str(); //! let change_descriptor = wallet_opts.change_descriptor.as_deref(); //! //! let database = MemoryDatabase::new(); //! //! let config = AnyBlockchainConfig::Electrum(ElectrumBlockchainConfig { //! url: wallet_opts.electrum_opts.server, //! socks5: wallet_opts.proxy_opts.proxy, //! retry: wallet_opts.proxy_opts.retries, //! timeout: None, //! stop_gap: 10 //! }); //! //! let wallet = Wallet::new( //! descriptor, //! change_descriptor, //! network, //! database, //! AnyBlockchain::from_config(&config).unwrap(), //! ).unwrap(); //! //! let result = bdk_cli::handle_online_wallet_subcommand(&wallet, online_subcommand).unwrap(); //! println!("{}", serde_json::to_string_pretty(&result).unwrap()); //! } //! # } //! ``` pub extern crate bdk; #[macro_use] extern crate serde_json; #[cfg(any( feature = "electrum", feature = "esplora", feature = "compact_filters", feature = "rpc" ))] #[macro_use] extern crate bdk_macros; use std::collections::BTreeMap; use std::str::FromStr; pub use structopt; use structopt::StructOpt; use crate::OfflineWalletSubCommand::*; #[cfg(any( feature = "electrum", feature = "esplora", feature = "compact_filters", feature = "rpc" ))] use crate::OnlineWalletSubCommand::*; use bdk::bitcoin::consensus::encode::{deserialize, serialize, serialize_hex}; #[cfg(any( feature = "electrum", feature = "esplora", feature = "compact_filters", feature = "rpc" ))] use bdk::bitcoin::hashes::hex::FromHex; use bdk::bitcoin::secp256k1::Secp256k1; use bdk::bitcoin::util::bip32::{DerivationPath, ExtendedPrivKey, KeySource}; use bdk::bitcoin::util::psbt::PartiallySignedTransaction; use bdk::bitcoin::{Address, Network, OutPoint, Script, Txid}; #[cfg(any( feature = "electrum", feature = "esplora", feature = "compact_filters", feature = "rpc" ))] use bdk::blockchain::{log_progress, Blockchain}; use bdk::database::BatchDatabase; use bdk::descriptor::Segwitv0; #[cfg(feature = "compiler")] use bdk::descriptor::{Descriptor, Legacy, Miniscript}; use bdk::keys::bip39::{Language, Mnemonic, MnemonicType}; use bdk::keys::DescriptorKey::Secret; use bdk::keys::KeyError::{InvalidNetwork, Message}; use bdk::keys::{DerivableKey, DescriptorKey, ExtendedKey, GeneratableKey, GeneratedKey}; use bdk::miniscript::miniscript; #[cfg(feature = "compiler")] use bdk::miniscript::policy::Concrete; use bdk::wallet::AddressIndex; use bdk::Error; use bdk::SignOptions; use bdk::{FeeRate, KeychainKind, Wallet}; /// Global options /// /// The global options and top level sub-command required for all subsequent [`CliSubCommand`]'s. /// /// # Example /// /// ``` /// # #[cfg(any(feature = "electrum", feature = "esplora", feature = "compact_filters", feature = "rpc"))] /// # { /// # use bdk::bitcoin::Network; /// # use structopt::StructOpt; /// # use bdk_cli::{CliOpts, WalletOpts, CliSubCommand, WalletSubCommand}; /// # #[cfg(feature = "electrum")] /// # use bdk_cli::ElectrumOpts; /// # #[cfg(feature = "esplora")] /// # use bdk_cli::EsploraOpts; /// # #[cfg(feature = "rpc")] /// # use bdk_cli::RpcOpts; /// # #[cfg(feature = "compact_filters")] /// # use bdk_cli::CompactFilterOpts; /// # #[cfg(any(feature = "compact_filters", feature = "electrum", feature="esplora"))] /// # use bdk_cli::ProxyOpts; /// # use bdk_cli::OnlineWalletSubCommand::Sync; /// /// let cli_args = vec!["bdk-cli", "--network", "testnet", "wallet", /// "--descriptor", "wpkh(tpubEBr4i6yk5nf5DAaJpsi9N2pPYBeJ7fZ5Z9rmN4977iYLCGco1VyjB9tvvuvYtfZzjD5A8igzgw3HeWeeKFmanHYqksqZXYXGsw5zjnj7KM9/44'/1'/0'/0/*)", /// "sync", "--max_addresses", "50"]; /// /// // to get CliOpts from the OS command line args use: /// // let cli_opts = CliOpts::from_args(); /// let cli_opts = CliOpts::from_iter(&cli_args); /// /// let expected_cli_opts = CliOpts { /// network: Network::Testnet, /// subcommand: CliSubCommand::Wallet { /// wallet_opts: WalletOpts { /// wallet: "main".to_string(), /// verbose: false, /// descriptor: "wpkh(tpubEBr4i6yk5nf5DAaJpsi9N2pPYBeJ7fZ5Z9rmN4977iYLCGco1VyjB9tvvuvYtfZzjD5A8igzgw3HeWeeKFmanHYqksqZXYXGsw5zjnj7KM9/44'/1'/0'/0/*)".to_string(), /// change_descriptor: None, /// #[cfg(feature = "electrum")] /// electrum_opts: ElectrumOpts { /// timeout: None, /// server: "ssl://electrum.blockstream.info:60002".to_string(), /// stop_gap: 10 /// }, /// #[cfg(feature = "esplora-ureq")] /// esplora_opts: EsploraOpts { /// server: "https://blockstream.info/testnet/api/".to_string(), /// read_timeout: 5, /// write_timeout: 5, /// stop_gap: 10 /// }, /// #[cfg(feature = "esplora-reqwest")] /// esplora_opts: EsploraOpts { /// server: "https://blockstream.info/testnet/api/".to_string(), /// conc: 4, /// stop_gap: 10 /// }, /// #[cfg(feature = "rpc")] /// rpc_opts: RpcOpts{ /// address: "127.0.0.1:18443".to_string(), /// auth: ("user".to_string(), "password".to_string()), /// skip_blocks: None, /// }, /// #[cfg(feature = "compact_filters")] /// compactfilter_opts: CompactFilterOpts{ /// address: vec!["127.0.0.1:18444".to_string()], /// conn_count: 4, /// skip_blocks: 0, /// }, /// #[cfg(any(feature="compact_filters", feature="electrum", feature="esplora"))] /// proxy_opts: ProxyOpts{ /// proxy: None, /// proxy_auth: None, /// retries: 5, /// }, /// }, /// subcommand: WalletSubCommand::OnlineWalletSubCommand(Sync { /// max_addresses: Some(50) /// }), /// }, /// }; /// /// assert_eq!(expected_cli_opts, cli_opts); /// # } /// ``` #[derive(Debug, StructOpt, Clone, PartialEq)] #[structopt(name = "BDK CLI", version = option_env ! ("CARGO_PKG_VERSION").unwrap_or("unknown"), author = option_env ! ("CARGO_PKG_AUTHORS").unwrap_or(""))] pub struct CliOpts { /// Sets the network #[structopt( name = "NETWORK", short = "n", long = "network", default_value = "testnet" )] pub network: Network, /// Top level cli sub-command #[structopt(subcommand)] pub subcommand: CliSubCommand, } /// CLI sub-commands /// /// The top level sub-commands, each may have different required options. For /// instance [`CliSubCommand::Wallet`] requires [`WalletOpts`] with a required descriptor but /// [`CliSubCommand::Key`] sub-command does not. [`CliSubCommand::Repl`] also requires /// [`WalletOpts`] and a descriptor because in this mode both [`WalletSubCommand`] and /// [`KeySubCommand`] sub-commands are available. #[derive(Debug, StructOpt, Clone, PartialEq)] #[structopt( rename_all = "snake", long_about = "Top level options and command modes" )] pub enum CliSubCommand { /// Wallet options and sub-commands #[structopt(long_about = "Wallet mode")] Wallet { #[structopt(flatten)] wallet_opts: WalletOpts, #[structopt(subcommand)] subcommand: WalletSubCommand, }, /// Key management sub-commands #[structopt(long_about = "Key management mode")] Key { #[structopt(subcommand)] subcommand: KeySubCommand, }, /// Compile a miniscript policy to an output descriptor #[cfg(feature = "compiler")] #[structopt(long_about = "Miniscript policy compiler")] Compile { /// Sets the spending policy to compile #[structopt(name = "POLICY", required = true, index = 1)] policy: String, /// Sets the script type used to embed the compiled policy #[structopt(name = "TYPE", short = "t", long = "type", default_value = "wsh", possible_values = &["sh","wsh", "sh-wsh"])] script_type: String, }, /// Enter REPL command loop mode #[cfg(feature = "repl")] #[structopt(long_about = "REPL command loop mode")] Repl { #[structopt(flatten)] wallet_opts: WalletOpts, }, } /// Wallet sub-commands /// /// Can use either an online or offline wallet. An [`OnlineWalletSubCommand`] requires a blockchain /// client and network connection and an [`OfflineWalletSubCommand`] does not. #[derive(Debug, StructOpt, Clone, PartialEq)] pub enum WalletSubCommand { #[cfg(any( feature = "electrum", feature = "esplora", feature = "compact_filters", feature = "rpc" ))] #[structopt(flatten)] OnlineWalletSubCommand(OnlineWalletSubCommand), #[structopt(flatten)] OfflineWalletSubCommand(OfflineWalletSubCommand), } /// Wallet options /// /// The wallet options required for all [`CliSubCommand::Wallet`] or [`CliSubCommand::Repl`] /// sub-commands. These options capture wallet descriptor and blockchain client information. The /// blockchain client details are only used for [`OnlineWalletSubCommand`]s. /// /// # Example /// /// ``` /// # use bdk::bitcoin::Network; /// # use structopt::StructOpt; /// # use bdk_cli::WalletOpts; /// # #[cfg(feature = "electrum")] /// # use bdk_cli::ElectrumOpts; /// # #[cfg(feature = "esplora")] /// # use bdk_cli::EsploraOpts; /// # #[cfg(feature = "compact_filters")] /// # use bdk_cli::CompactFilterOpts; /// # #[cfg(feature = "rpc")] /// # use bdk_cli::RpcOpts; /// # #[cfg(any(feature = "compact_filters", feature = "electrum", feature="esplora"))] /// # use bdk_cli::ProxyOpts; /// /// let cli_args = vec!["wallet", /// "--descriptor", "wpkh(tpubEBr4i6yk5nf5DAaJpsi9N2pPYBeJ7fZ5Z9rmN4977iYLCGco1VyjB9tvvuvYtfZzjD5A8igzgw3HeWeeKFmanHYqksqZXYXGsw5zjnj7KM9/44'/1'/0'/0/*)"]; /// /// // to get WalletOpt from OS command line args use: /// // let wallet_opt = WalletOpt::from_args(); /// /// let wallet_opts = WalletOpts::from_iter(&cli_args); /// /// let expected_wallet_opts = WalletOpts { /// wallet: "main".to_string(), /// verbose: false, /// descriptor: "wpkh(tpubEBr4i6yk5nf5DAaJpsi9N2pPYBeJ7fZ5Z9rmN4977iYLCGco1VyjB9tvvuvYtfZzjD5A8igzgw3HeWeeKFmanHYqksqZXYXGsw5zjnj7KM9/44'/1'/0'/0/*)".to_string(), /// change_descriptor: None, /// #[cfg(feature = "electrum")] /// electrum_opts: ElectrumOpts { /// timeout: None, /// server: "ssl://electrum.blockstream.info:60002".to_string(), /// stop_gap: 10 /// }, /// #[cfg(feature = "esplora-ureq")] /// esplora_opts: EsploraOpts { /// server: "https://blockstream.info/testnet/api/".to_string(), /// read_timeout: 5, /// write_timeout: 5, /// stop_gap: 10 /// }, /// #[cfg(feature = "esplora-reqwest")] /// esplora_opts: EsploraOpts { /// server: "https://blockstream.info/testnet/api/".to_string(), /// conc: 4, /// stop_gap: 10 /// }, /// #[cfg(feature = "compact_filters")] /// compactfilter_opts: CompactFilterOpts{ /// address: vec!["127.0.0.1:18444".to_string()], /// conn_count: 4, /// skip_blocks: 0, /// }, /// #[cfg(feature = "rpc")] /// rpc_opts: RpcOpts{ /// address: "127.0.0.1:18443".to_string(), /// auth: ("user".to_string(), "password".to_string()), /// skip_blocks: None, /// }, /// #[cfg(any(feature="compact_filters", feature="electrum", feature="esplora"))] /// proxy_opts: ProxyOpts{ /// proxy: None, /// proxy_auth: None, /// retries: 5, /// }, /// }; /// /// assert_eq!(expected_wallet_opts, wallet_opts); /// ``` #[derive(Debug, StructOpt, Clone, PartialEq)] pub struct WalletOpts { /// Selects the wallet to use #[structopt( name = "WALLET_NAME", short = "w", long = "wallet", default_value = "main" )] pub wallet: String, /// Adds verbosity, returns PSBT in JSON format alongside serialized, displays expanded objects #[structopt(name = "VERBOSE", short = "v", long = "verbose")] pub verbose: bool, /// Sets the descriptor to use for the external addresses #[structopt(name = "DESCRIPTOR", short = "d", long = "descriptor", required = true)] pub descriptor: String, /// Sets the descriptor to use for internal addresses #[structopt(name = "CHANGE_DESCRIPTOR", short = "c", long = "change_descriptor")] pub change_descriptor: Option<String>, #[cfg(feature = "electrum")] #[structopt(flatten)] pub electrum_opts: ElectrumOpts, #[cfg(feature = "esplora")] #[structopt(flatten)] pub esplora_opts: EsploraOpts, #[cfg(feature = "compact_filters")] #[structopt(flatten)] pub compactfilter_opts: CompactFilterOpts, #[cfg(feature = "rpc")] #[structopt(flatten)] pub rpc_opts: RpcOpts, #[cfg(any(feature = "compact_filters", feature = "electrum", feature = "esplora"))] #[structopt(flatten)] pub proxy_opts: ProxyOpts, } /// Proxy Server options /// /// Only activated for `compact_filters` or `electrum` #[cfg(any(feature = "compact_filters", feature = "electrum", feature = "esplora"))] #[derive(Debug, StructOpt, Clone, PartialEq)] pub struct ProxyOpts { /// Sets the SOCKS5 proxy for Blockchain backend #[structopt(name = "PROXY_ADDRS:PORT", long = "proxy", short = "p")] pub proxy: Option<String>, /// Sets the SOCKS5 proxy credential #[structopt(name="PROXY_USER:PASSWD", long="proxy_auth", short="a", parse(try_from_str = parse_proxy_auth))] pub proxy_auth: Option<(String, String)>, /// Sets the SOCKS5 proxy retries for the Electrum client #[structopt( name = "PROXY_RETRIES", short = "r", long = "retries", default_value = "5" )] pub retries: u8, } /// Compact Filter options /// /// Compact filter peer information used by [`OnlineWalletSubCommand`]s. #[cfg(feature = "compact_filters")] #[derive(Debug, StructOpt, Clone, PartialEq)] pub struct CompactFilterOpts { /// Sets the full node network address #[structopt( name = "ADDRESS:PORT", short = "n", long = "node", default_value = "127.0.0.1:18444" )] pub address: Vec<String>, /// Sets the number of parallel node connections #[structopt(name = "CONNECTIONS", long = "conn_count", default_value = "4")] pub conn_count: usize, /// Optionally skip initial `skip_blocks` blocks #[structopt( name = "SKIP_BLOCKS", short = "k", long = "skip_blocks", default_value = "0" )] pub skip_blocks: usize, } #[cfg(feature = "rpc")] #[derive(Debug, StructOpt, Clone, PartialEq)] pub struct RpcOpts { /// Sets the full node address for rpc connection #[structopt( name = "ADDRESS:PORT", short = "n", long = "node", default_value = "127.0.0.1:18443" )] pub address: String, /// Sets the rpc authentication username:password #[structopt( name = "USER:PASSWD", short = "a", long = "auth", parse(try_from_str = parse_proxy_auth), default_value = "user:password", )] pub auth: (String, String), /// Optionally skip initial `skip_blocks` blocks #[structopt(name = "SKIP_BLOCKS", short = "s", long = "skip-blocks")] pub skip_blocks: Option<u32>, } /// Electrum options /// /// Electrum blockchain client information used by [`OnlineWalletSubCommand`]s. #[cfg(feature = "electrum")] #[derive(Debug, StructOpt, Clone, PartialEq)] pub struct ElectrumOpts { /// Sets the SOCKS5 proxy timeout for the Electrum client #[structopt(name = "PROXY_TIMEOUT", short = "t", long = "timeout")] pub timeout: Option<u8>, /// Sets the Electrum server to use #[structopt( name = "ELECTRUM_URL", short = "s", long = "server", default_value = "ssl://electrum.blockstream.info:60002" )] pub server: String, /// Stop searching addresses for transactions after finding an unused gap of this length. #[structopt( name = "STOP_GAP", long = "stop_gap", short = "g", default_value = "10" )] pub stop_gap: usize, } /// Esplora options /// /// Esplora blockchain client information used by [`OnlineWalletSubCommand`]s. #[cfg(feature = "esplora-ureq")] #[derive(Debug, StructOpt, Clone, PartialEq)] pub struct EsploraOpts { /// Use the esplora server if given as parameter #[structopt( name = "ESPLORA_URL", short = "s", long = "server", default_value = "https://blockstream.info/testnet/api/" )] pub server: String, /// Socket read timeout #[structopt(name = "READ_TIMEOUT", long = "read_timeout", default_value = "5")] pub read_timeout: u64, /// Socket write timeout #[structopt(name = "WRITE_TIMEOUT", long = "write_timeout", default_value = "5")] pub write_timeout: u64, /// Stop searching addresses for transactions after finding an unused gap of this length. #[structopt( name = "STOP_GAP", long = "stop_gap", short = "g", default_value = "10" )] pub stop_gap: usize, } #[cfg(feature = "esplora-reqwest")] #[derive(Debug, StructOpt, Clone, PartialEq)] pub struct EsploraOpts { /// Use the esplora server if given as parameter #[structopt( name = "ESPLORA_URL", short = "s", long = "server", default_value = "https://blockstream.info/testnet/api/" )] pub server: String, /// Number of parallel requests sent to the esplora service (default: 4) #[structopt(name = "CONCURRENCY", long = "conc", default_value = "4")] pub conc: u8, /// Stop searching addresses for transactions after finding an unused gap of this length. #[structopt( name = "STOP_GAP", long = "stop_gap", short = "g", default_value = "10" )] pub stop_gap: usize, } // This is a workaround for `structopt` issue #333, #391, #418; see https://github.com/TeXitoi/structopt/issues/333#issuecomment-712265332 #[cfg_attr(not(doc), allow(missing_docs))] #[cfg_attr( doc, doc = r#" Offline Wallet sub-command [`CliSubCommand::Wallet`] sub-commands that do not require a blockchain client and network connection. These sub-commands use only the provided descriptor or locally cached wallet information. # Example ``` # use bdk_cli::OfflineWalletSubCommand; # use structopt::StructOpt; let address_sub_command = OfflineWalletSubCommand::from_iter(&["wallet", "get_new_address"]); assert!(matches!( address_sub_command, OfflineWalletSubCommand::GetNewAddress )); ``` To capture wallet sub-commands from a string vector without a preceeding binary name you can create a custom struct the includes the `NoBinaryName` clap setting and wraps the WalletSubCommand enum. See also the [`bdk-cli`](https://github.com/bitcoindevkit/bdk-cli/blob/master/src/bdkcli.rs) example app. "# )] #[cfg_attr( all(doc, feature = "repl"), doc = r#" # Example ``` # use bdk_cli::OfflineWalletSubCommand; # use structopt::StructOpt; # use clap::AppSettings; #[derive(Debug, StructOpt, Clone, PartialEq)] #[structopt(name = "BDK CLI", setting = AppSettings::NoBinaryName, version = option_env ! ("CARGO_PKG_VERSION").unwrap_or("unknown"), author = option_env ! ("CARGO_PKG_AUTHORS").unwrap_or(""))] struct ReplOpts { /// Wallet sub-command #[structopt(subcommand)] pub subcommand: OfflineWalletSubCommand, } let repl_opts = ReplOpts::from_iter(&["get_new_address"]); assert!(matches!( repl_opts.subcommand, OfflineWalletSubCommand::GetNewAddress )); "# )] #[derive(Debug, StructOpt, Clone, PartialEq)] #[structopt(rename_all = "snake")] pub enum OfflineWalletSubCommand { /// Generates a new external address GetNewAddress, /// Lists the available spendable UTXOs ListUnspent, /// Lists all the incoming and outgoing transactions of the wallet ListTransactions, /// Returns the current wallet balance GetBalance, /// Creates a new unsigned transaction CreateTx { /// Adds a recipient to the transaction #[structopt(name = "ADDRESS:SAT", long = "to", required = true, parse(try_from_str = parse_recipient))] recipients: Vec<(Script, u64)>, /// Sends all the funds (or all the selected utxos). Requires only one recipients of value 0 #[structopt(short = "all", long = "send_all")] send_all: bool, /// Enables Replace-By-Fee (BIP125) #[structopt(short = "rbf", long = "enable_rbf")] enable_rbf: bool, /// Make a PSBT that can be signed by offline signers and hardware wallets. Forces the addition of `non_witness_utxo` and more details to let the signer identify the change output. #[structopt(long = "offline_signer")] offline_signer: bool, /// Selects which utxos *must* be spent #[structopt(name = "MUST_SPEND_TXID:VOUT", long = "utxos", parse(try_from_str = parse_outpoint))] utxos: Option<Vec<OutPoint>>, /// Marks a utxo as unspendable #[structopt(name = "CANT_SPEND_TXID:VOUT", long = "unspendable", parse(try_from_str = parse_outpoint))] unspendable: Option<Vec<OutPoint>>, /// Fee rate to use in sat/vbyte #[structopt(name = "SATS_VBYTE", short = "fee", long = "fee_rate")] fee_rate: Option<f32>, /// Selects which policy should be used to satisfy the external descriptor #[structopt(name = "EXT_POLICY", long = "external_policy")] external_policy: Option<String>, /// Selects which policy should be used to satisfy the internal descriptor #[structopt(name = "INT_POLICY", long = "internal_policy")] internal_policy: Option<String>, }, /// Bumps the fees of an RBF transaction BumpFee { /// TXID of the transaction to update #[structopt(name = "TXID", short = "txid", long = "txid")] txid: String, /// Allows the wallet to reduce the amount to the specified address in order to increase fees. #[structopt(name = "SHRINK_ADDRESS", short = "s", long = "shrink")] shrink_address: Option<Address>, /// Make a PSBT that can be signed by offline signers and hardware wallets. Forces the addition of `non_witness_utxo` and more details to let the signer identify the change output. #[structopt(long = "offline_signer")] offline_signer: bool, /// Selects which utxos *must* be added to the tx. Unconfirmed utxos cannot be used #[structopt(name = "MUST_SPEND_TXID:VOUT", long = "utxos", parse(try_from_str = parse_outpoint))] utxos: Option<Vec<OutPoint>>, /// Marks an utxo as unspendable, in case more inputs are needed to cover the extra fees #[structopt(name = "CANT_SPEND_TXID:VOUT", long = "unspendable", parse(try_from_str = parse_outpoint))] unspendable: Option<Vec<OutPoint>>, /// The new targeted fee rate in sat/vbyte #[structopt(name = "SATS_VBYTE", short = "fee", long = "fee_rate")] fee_rate: f32, }, /// Returns the available spending policies for the descriptor Policies, /// Returns the public version of the wallet's descriptor(s) PublicDescriptor, /// Signs and tries to finalize a PSBT Sign { /// Sets the PSBT to sign #[structopt(name = "BASE64_PSBT", long = "psbt")] psbt: String, /// Assume the blockchain has reached a specific height. This affects the transaction finalization, if there are timelocks in the descriptor #[structopt(name = "HEIGHT", long = "assume_height")] assume_height: Option<u32>, }, /// Extracts a raw transaction from a PSBT ExtractPsbt { /// Sets the PSBT to extract #[structopt(name = "BASE64_PSBT", long = "psbt")] psbt: String, }, /// Finalizes a PSBT FinalizePsbt { /// Sets the PSBT to finalize #[structopt(name = "BASE64_PSBT", long = "psbt")] psbt: String, /// Assume the blockchain has reached a specific height #[structopt(name = "HEIGHT", long = "assume_height")] assume_height: Option<u32>, }, /// Combines multiple PSBTs into one CombinePsbt { /// Add one PSBT to combine. This option can be repeated multiple times, one for each PSBT #[structopt(name = "BASE64_PSBT", long = "psbt", required = true)] psbt: Vec<String>, }, } #[cfg_attr(not(doc), allow(missing_docs))] #[cfg_attr( doc, doc = r#" Online Wallet sub-command [`CliSubCommand::Wallet`] sub-commands that require a blockchain client and network connection. These sub-commands use a provided descriptor, locally cached wallet information, and require a blockchain client and network connection. "# )] #[derive(Debug, StructOpt, Clone, PartialEq)] #[structopt(rename_all = "snake")] #[cfg(any( feature = "electrum", feature = "esplora", feature = "compact_filters", feature = "rpc" ))] pub enum OnlineWalletSubCommand { /// Syncs with the chosen blockchain server Sync { /// max addresses to consider #[structopt(short = "v", long = "max_addresses")] max_addresses: Option<u32>, }, /// Broadcasts a transaction to the network. Takes either a raw transaction or a PSBT to extract Broadcast { /// Sets the PSBT to sign #[structopt( name = "BASE64_PSBT", long = "psbt", required_unless = "RAWTX", conflicts_with = "RAWTX" )] psbt: Option<String>, /// Sets the raw transaction to broadcast #[structopt( name = "RAWTX", long = "tx", required_unless = "BASE64_PSBT", conflicts_with = "BASE64_PSBT" )] tx: Option<String>, }, } fn parse_recipient(s: &str) -> Result<(Script, u64), String> { let parts: Vec<_> = s.split(':').collect(); if parts.len() != 2 { return Err("Invalid format".to_string()); } let addr = Address::from_str(parts[0]); if let Err(e) = addr { return Err(format!("{:?}", e)); } let val = u64::from_str(parts[1]); if let Err(e) = val { return Err(format!("{:?}", e)); } Ok((addr.unwrap().script_pubkey(), val.unwrap())) } #[cfg(any( feature = "electrum", feature = "compact_filters", feature = "esplora", feature = "rpc" ))] fn parse_proxy_auth(s: &str) -> Result<(String, String), String> { let parts: Vec<_> = s.split(':').collect(); if parts.len() != 2 { return Err("Invalid format".to_string()); } let user = parts[0].to_string(); let passwd = parts[1].to_string(); Ok((user, passwd)) } fn parse_outpoint(s: &str) -> Result<OutPoint, String> { OutPoint::from_str(s).map_err(|e| format!("{:?}", e)) } /// Execute an offline wallet sub-command /// /// Offline wallet sub-commands are described in [`OfflineWalletSubCommand`]. pub fn handle_offline_wallet_subcommand<T, D>( wallet: &Wallet<T, D>, wallet_opts: &WalletOpts, offline_subcommand: OfflineWalletSubCommand, ) -> Result<serde_json::Value, Error> where D: BatchDatabase, { match offline_subcommand { GetNewAddress => Ok(json!({"address": wallet.get_address(AddressIndex::New)?.address})), ListUnspent => Ok(serde_json::to_value(&wallet.list_unspent()?)?), ListTransactions => Ok(serde_json::to_value( &wallet.list_transactions(wallet_opts.verbose)?, )?), GetBalance => Ok(json!({"satoshi": wallet.get_balance()?})), CreateTx { recipients, send_all, enable_rbf, offline_signer, utxos, unspendable, fee_rate, external_policy, internal_policy, } => { let mut tx_builder = wallet.build_tx(); if send_all { tx_builder.drain_wallet().drain_to(recipients[0].0.clone()); } else { tx_builder.set_recipients(recipients); } if enable_rbf { tx_builder.enable_rbf(); } if offline_signer { tx_builder.include_output_redeem_witness_script(); } if let Some(fee_rate) = fee_rate { tx_builder.fee_rate(FeeRate::from_sat_per_vb(fee_rate)); } if let Some(utxos) = utxos { tx_builder.add_utxos(&utxos[..])?.manually_selected_only(); } if let Some(unspendable) = unspendable { tx_builder.unspendable(unspendable); } let policies = vec![ external_policy.map(|p| (p, KeychainKind::External)), internal_policy.map(|p| (p, KeychainKind::Internal)), ]; for (policy, keychain) in policies.into_iter().flatten() { let policy = serde_json::from_str::<BTreeMap<String, Vec<usize>>>(&policy) .map_err(|s| Error::Generic(s.to_string()))?; tx_builder.policy_path(policy, keychain); } let (psbt, details) = tx_builder.finish()?; if wallet_opts.verbose { Ok( json!({"psbt": base64::encode(&serialize(&psbt)),"details": details, "serialized_psbt": psbt}), ) } else { Ok(json!({"psbt": base64::encode(&serialize(&psbt)),"details": details})) } } BumpFee { txid, shrink_address, offline_signer, utxos, unspendable, fee_rate, } => { let txid = Txid::from_str(txid.as_str()).map_err(|s| Error::Generic(s.to_string()))?; let mut tx_builder = wallet.build_fee_bump(txid)?; tx_builder.fee_rate(FeeRate::from_sat_per_vb(fee_rate)); if let Some(address) = shrink_address { let script_pubkey = address.script_pubkey(); tx_builder.allow_shrinking(script_pubkey)?; } if offline_signer { tx_builder.include_output_redeem_witness_script(); } if let Some(utxos) = utxos { tx_builder.add_utxos(&utxos[..])?; } if let Some(unspendable) = unspendable { tx_builder.unspendable(unspendable); } let (psbt, details) = tx_builder.finish()?; Ok(json!({"psbt": base64::encode(&serialize(&psbt)),"details": details,})) } Policies => Ok(json!({ "external": wallet.policies(KeychainKind::External)?, "internal": wallet.policies(KeychainKind::Internal)?, })), PublicDescriptor => Ok(json!({ "external": wallet.public_descriptor(KeychainKind::External)?.map(|d| d.to_string()), "internal": wallet.public_descriptor(KeychainKind::Internal)?.map(|d| d.to_string()), })), Sign { psbt, assume_height, } => { let psbt = base64::decode(&psbt).unwrap(); let mut psbt: PartiallySignedTransaction = deserialize(&psbt).unwrap(); let signopt = SignOptions { assume_height, ..Default::default() }; let finalized = wallet.sign(&mut psbt, signopt)?; if wallet_opts.verbose { Ok( json!({"psbt": base64::encode(&serialize(&psbt)),"is_finalized": finalized, "serialized_psbt": psbt}), ) } else { Ok(json!({"psbt": base64::encode(&serialize(&psbt)),"is_finalized": finalized,})) } } ExtractPsbt { psbt } => { let psbt = base64::decode(&psbt).unwrap(); let psbt: PartiallySignedTransaction = deserialize(&psbt).unwrap(); Ok(json!({"raw_tx": serialize_hex(&psbt.extract_tx()),})) } FinalizePsbt { psbt, assume_height, } => { let psbt = base64::decode(&psbt).unwrap(); let mut psbt: PartiallySignedTransaction = deserialize(&psbt).unwrap(); let signopt = SignOptions { assume_height, ..Default::default() }; let finalized = wallet.finalize_psbt(&mut psbt, signopt)?; if wallet_opts.verbose { Ok( json!({ "psbt": base64::encode(&serialize(&psbt)),"is_finalized": finalized, "serialized_psbt": psbt}), ) } else { Ok(json!({ "psbt": base64::encode(&serialize(&psbt)),"is_finalized": finalized,})) } } CombinePsbt { psbt } => { let mut psbts = psbt .iter() .map(|s| { let psbt = base64::decode(&s).unwrap(); let psbt: PartiallySignedTransaction = deserialize(&psbt).unwrap(); psbt }) .collect::<Vec<_>>(); let init_psbt = psbts.pop().unwrap(); let final_psbt = psbts .into_iter() .try_fold::<_, _, Result<PartiallySignedTransaction, Error>>( init_psbt, |mut acc, x| { acc.merge(x)?; Ok(acc) }, )?; Ok(json!({ "psbt": base64::encode(&serialize(&final_psbt)) })) } } } /// Execute an online wallet sub-command /// /// Online wallet sub-commands are described in [`OnlineWalletSubCommand`]. See [`crate`] for /// example usage. #[cfg(any( feature = "electrum", feature = "esplora", feature = "compact_filters", feature = "rpc" ))] #[maybe_async] pub fn handle_online_wallet_subcommand<C, D>( wallet: &Wallet<C, D>, online_subcommand: OnlineWalletSubCommand, ) -> Result<serde_json::Value, Error> where C: Blockchain, D: BatchDatabase, { match online_subcommand { Sync { max_addresses } => { maybe_await!(wallet.sync(log_progress(), max_addresses))?; Ok(json!({})) } Broadcast { psbt, tx } => { let tx = match (psbt, tx) { (Some(psbt), None) => { let psbt = base64::decode(&psbt).unwrap(); let psbt: PartiallySignedTransaction = deserialize(&psbt).unwrap(); psbt.extract_tx() } (None, Some(tx)) => deserialize(&Vec::<u8>::from_hex(&tx).unwrap()).unwrap(), (Some(_), Some(_)) => panic!("Both `psbt` and `tx` options not allowed"), (None, None) => panic!("Missing `psbt` and `tx` option"), }; let txid = maybe_await!(wallet.broadcast(tx))?; Ok(json!({ "txid": txid })) } } } #[cfg_attr(not(doc), allow(missing_docs))] #[cfg_attr( doc, doc = r#" Key sub-command Provides basic key operations that are not related to a specific wallet such as generating a new random master extended key or restoring a master extended key from mnemonic words. These sub-commands are **EXPERIMENTAL** and should only be used for testing. Do not use this feature to create keys that secure actual funds on the Bitcoin mainnet. "# )] #[derive(Debug, StructOpt, Clone, PartialEq)] #[structopt(rename_all = "snake")] pub enum KeySubCommand { /// Generates new random seed mnemonic phrase and corresponding master extended key Generate { /// Entropy level based on number of random seed mnemonic words #[structopt( name = "WORD_COUNT", short = "e", long = "entropy", default_value = "24", possible_values = &["12","24"], )] word_count: usize, /// Seed password #[structopt(name = "PASSWORD", short = "p", long = "password")] password: Option<String>, }, /// Restore a master extended key from seed backup mnemonic words Restore { /// Seed mnemonic words, must be quoted (eg. "word1 word2 ...") #[structopt(name = "MNEMONIC", short = "m", long = "mnemonic")] mnemonic: String, /// Seed password #[structopt(name = "PASSWORD", short = "p", long = "password")] password: Option<String>, }, /// Derive a child key pair from a master extended key and a derivation path string (eg. "m/84'/1'/0'/0" or "m/84h/1h/0h/0") Derive { /// Extended private key to derive from #[structopt(name = "XPRV", short = "x", long = "xprv")] xprv: ExtendedPrivKey, /// Path to use to derive extended public key from extended private key #[structopt(name = "PATH", short = "p", long = "path")] path: DerivationPath, }, } /// Execute a key sub-command /// /// Key sub-commands are described in [`KeySubCommand`]. pub fn handle_key_subcommand( network: Network, subcommand: KeySubCommand, ) -> Result<serde_json::Value, Error> { let secp = Secp256k1::new(); match subcommand { KeySubCommand::Generate { word_count, password, } => { let mnemonic_type = match word_count { 12 => MnemonicType::Words12, _ => MnemonicType::Words24, }; let mnemonic: GeneratedKey<_, miniscript::BareCtx> = Mnemonic::generate((mnemonic_type, Language::English)).unwrap(); //.map_err(|e| KeyError::from(e.unwrap()))?; let mnemonic = mnemonic.into_key(); let xkey: ExtendedKey = (mnemonic.clone(), password).into_extended_key()?; let xprv = xkey.into_xprv(network).unwrap(); let fingerprint = xprv.fingerprint(&secp); Ok( json!({ "mnemonic": mnemonic.phrase(), "xprv": xprv.to_string(), "fingerprint": fingerprint.to_string() }), ) } KeySubCommand::Restore { mnemonic, password } => { let mnemonic = Mnemonic::from_phrase(mnemonic.as_ref(), Language::English).unwrap(); // .map_err(|e| { // KeyError::from(e.downcast::<bdk::keys::bip39::ErrorKind>().unwrap()) // })?; let xkey: ExtendedKey = (mnemonic, password).into_extended_key()?; let xprv = xkey.into_xprv(network).unwrap(); let fingerprint = xprv.fingerprint(&secp); Ok(json!({ "xprv": xprv.to_string(), "fingerprint": fingerprint.to_string() })) } KeySubCommand::Derive { xprv, path } => { if xprv.network != network { return Err(Error::Key(InvalidNetwork)); } let derived_xprv = &xprv.derive_priv(&secp, &path)?; let origin: KeySource = (xprv.fingerprint(&secp), path); let derived_xprv_desc_key: DescriptorKey<Segwitv0> = derived_xprv.into_descriptor_key(Some(origin), DerivationPath::default())?; if let Secret(desc_seckey, _, _) = derived_xprv_desc_key { let desc_pubkey = desc_seckey.as_public(&secp).unwrap(); Ok(json!({"xpub": desc_pubkey.to_string(), "xprv": desc_seckey.to_string()})) } else { Err(Error::Key(Message("Invalid key variant".to_string()))) } } } } /// Execute the miniscript compiler sub-command /// /// Compiler options are described in [`CliSubCommand::Compile`]. #[cfg(feature = "compiler")] pub fn handle_compile_subcommand( _network: Network, policy: String, script_type: String, ) -> Result<serde_json::Value, Error> { let policy = Concrete::<String>::from_str(policy.as_str())?; let legacy_policy: Miniscript<String, Legacy> = policy .compile() .map_err(|e| Error::Generic(e.to_string()))?; let segwit_policy: Miniscript<String, Segwitv0> = policy .compile() .map_err(|e| Error::Generic(e.to_string()))?; let descriptor = match script_type.as_str() { "sh" => Descriptor::new_sh(legacy_policy), "wsh" => Descriptor::new_wsh(segwit_policy), "sh-wsh" => Descriptor::new_sh_wsh(segwit_policy), _ => panic!("Invalid type"), } .map_err(Error::Miniscript)?; Ok(json!({"descriptor": descriptor.to_string()})) } #[cfg(test)] mod test { use super::{CliOpts, WalletOpts}; #[cfg(feature = "compiler")] use crate::handle_compile_subcommand; #[cfg(feature = "compact_filters")] use crate::CompactFilterOpts; #[cfg(feature = "electrum")] use crate::ElectrumOpts; #[cfg(feature = "esplora")] use crate::EsploraOpts; use crate::OfflineWalletSubCommand::{BumpFee, CreateTx, GetNewAddress}; #[cfg(any( feature = "electrum", feature = "esplora", feature = "compact_filters", feature = "rpc" ))] use crate::OnlineWalletSubCommand::{Broadcast, Sync}; #[cfg(any(feature = "compact_filters", feature = "electrum", feature = "esplora"))] use crate::ProxyOpts; #[cfg(feature = "rpc")] use crate::RpcOpts; use crate::{handle_key_subcommand, CliSubCommand, KeySubCommand, WalletSubCommand}; use bdk::bitcoin::util::bip32::{DerivationPath, ExtendedPrivKey}; use bdk::bitcoin::{Address, Network, OutPoint}; use bdk::miniscript::bitcoin::network::constants::Network::Testnet; use std::str::FromStr; use structopt::StructOpt; #[test] fn test_parse_wallet_get_new_address() { let cli_args = vec!["bdk-cli", "--network", "bitcoin", "wallet", "--descriptor", "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)", "--change_descriptor", "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)", "get_new_address"]; let cli_opts = CliOpts::from_iter(&cli_args); let expected_cli_opts = CliOpts { network: Network::Bitcoin, subcommand: CliSubCommand::Wallet { wallet_opts: WalletOpts { wallet: "main".to_string(), verbose: false, descriptor: "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)".to_string(), change_descriptor: Some("wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)".to_string()), #[cfg(feature = "electrum")] electrum_opts: ElectrumOpts { timeout: None, server: "ssl://electrum.blockstream.info:60002".to_string(), stop_gap: 10, }, #[cfg(feature = "esplora-ureq")] esplora_opts: EsploraOpts { server: "https://blockstream.info/testnet/api/".to_string(), read_timeout: 5, write_timeout: 5, stop_gap: 10, }, #[cfg(feature = "esplora-reqwest")] esplora_opts: EsploraOpts { server: "https://blockstream.info/testnet/api/".to_string(), conc: 4, stop_gap: 10, }, #[cfg(feature = "compact_filters")] compactfilter_opts: CompactFilterOpts{ address: vec!["127.0.0.1:18444".to_string()], conn_count: 4, skip_blocks: 0, }, #[cfg(any(feature="compact_filters", feature="electrum", feature="esplora"))] proxy_opts: ProxyOpts{ proxy: None, proxy_auth: None, retries: 5, }, #[cfg(feature = "rpc")] rpc_opts: RpcOpts { address: "127.0.0.1:18443".to_string(), auth: ("user".to_string(), "password".to_string()), skip_blocks: None, }, }, subcommand: WalletSubCommand::OfflineWalletSubCommand(GetNewAddress), }, }; assert_eq!(expected_cli_opts, cli_opts); } #[cfg(feature = "electrum")] #[test] fn test_parse_wallet_electrum() { let cli_args = vec!["bdk-cli", "--network", "testnet", "wallet", "--proxy", "127.0.0.1:9150", "--retries", "3", "--timeout", "10", "--descriptor", "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)", "--change_descriptor", "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)", "--server","ssl://electrum.blockstream.info:50002", "--stop_gap", "20", "get_new_address"]; let cli_opts = CliOpts::from_iter(&cli_args); let expected_cli_opts = CliOpts { network: Network::Testnet, subcommand: CliSubCommand::Wallet { wallet_opts: WalletOpts { wallet: "main".to_string(), verbose: false, descriptor: "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)".to_string(), change_descriptor: Some("wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)".to_string()), electrum_opts: ElectrumOpts { timeout: Some(10), server: "ssl://electrum.blockstream.info:50002".to_string(), stop_gap: 20 }, proxy_opts: ProxyOpts{ proxy: Some("127.0.0.1:9150".to_string()), proxy_auth: None, retries: 3, }, #[cfg(feature = "rpc")] rpc_opts: RpcOpts { address: "127.0.0.1:18443".to_string(), auth: ("user".to_string(), "password".to_string()), skip_blocks: None, } }, subcommand: WalletSubCommand::OfflineWalletSubCommand(GetNewAddress), }, }; assert_eq!(expected_cli_opts, cli_opts); } #[cfg(feature = "esplora-ureq")] #[test] fn test_parse_wallet_esplora() { let cli_args = vec!["bdk-cli", "--network", "bitcoin", "wallet", "--descriptor", "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)", "--change_descriptor", "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)", "--server", "https://blockstream.info/api/", "--read_timeout", "10", "--write_timeout", "10", "--stop_gap", "20", "get_new_address"]; let cli_opts = CliOpts::from_iter(&cli_args); let expected_cli_opts = CliOpts { network: Network::Bitcoin, subcommand: CliSubCommand::Wallet { wallet_opts: WalletOpts { wallet: "main".to_string(), verbose: false, descriptor: "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)".to_string(), change_descriptor: Some("wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)".to_string()), esplora_opts: EsploraOpts { server: "https://blockstream.info/api/".to_string(), read_timeout: 10, write_timeout: 10, stop_gap: 20 }, proxy_opts: ProxyOpts{ proxy: None, proxy_auth: None, retries: 5, } }, subcommand: WalletSubCommand::OfflineWalletSubCommand(GetNewAddress), }, }; assert_eq!(expected_cli_opts, cli_opts); } #[cfg(feature = "esplora-reqwest")] #[test] fn test_parse_wallet_esplora() { let cli_args = vec!["bdk-cli", "--network", "bitcoin", "wallet", "--descriptor", "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)", "--change_descriptor", "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)", "--server", "https://blockstream.info/api/", "--conc", "10", "--stop_gap", "20", "get_new_address"]; let cli_opts = CliOpts::from_iter(&cli_args); let expected_cli_opts = CliOpts { network: Network::Bitcoin, subcommand: CliSubCommand::Wallet { wallet_opts: WalletOpts { wallet: "main".to_string(), verbose: false, descriptor: "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)".to_string(), change_descriptor: Some("wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)".to_string()), esplora_opts: EsploraOpts { server: "https://blockstream.info/api/".to_string(), conc: 10, stop_gap: 20 }, proxy_opts: ProxyOpts{ proxy: None, proxy_auth: None, retries: 5, } }, subcommand: WalletSubCommand::OfflineWalletSubCommand(GetNewAddress), }, }; assert_eq!(expected_cli_opts, cli_opts); } #[cfg(feature = "rpc")] #[test] fn test_parse_wallet_rpc() { let cli_args = vec!["bdk-cli", "--network", "bitcoin", "wallet", "--descriptor", "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)", "--change_descriptor", "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)", "--node", "125.67.89.101:56678", "--auth", "user:password", "--skip-blocks", "5", "get_new_address"]; let cli_opts = CliOpts::from_iter(&cli_args); let expected_cli_opts = CliOpts { network: Network::Bitcoin, subcommand: CliSubCommand::Wallet { wallet_opts: WalletOpts { wallet: "main".to_string(), verbose: false, descriptor: "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)".to_string(), change_descriptor: Some("wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)".to_string()), #[cfg(feature = "electrum")] electrum_opts: ElectrumOpts { timeout: None, server: "ssl://electrum.blockstream.info:60002".to_string(), }, #[cfg(feature = "esplora")] esplora_opts: EsploraOpts { server: "https://blockstream.info/api/".to_string(), concurrency: 5, }, #[cfg(feature = "compact_filters")] compactfilter_opts: CompactFilterOpts{ address: vec!["127.0.0.1:18444".to_string()], skip_blocks: 0, conn_count: 4, }, #[cfg(any(feature="compact_filters", feature="electrum"))] proxy_opts: ProxyOpts{ proxy: None, proxy_auth: None, retries: 5, }, #[cfg(feature = "rpc")] rpc_opts: RpcOpts { address: "125.67.89.101:56678".to_string(), auth: ("user".to_string(), "password".to_string()), skip_blocks: Some(5), }, }, subcommand: WalletSubCommand::OfflineWalletSubCommand(GetNewAddress), }, }; assert_eq!(expected_cli_opts, cli_opts); } #[cfg(feature = "compact_filters")] #[test] fn test_parse_wallet_compact_filters() { let cli_args = vec!["bdk-cli", "--network", "bitcoin", "wallet", "--descriptor", "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)", "--change_descriptor", "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)", "--proxy", "127.0.0.1:9005", "--proxy_auth", "random_user:random_passwd", "--node", "127.0.0.1:18444", "127.2.3.1:19695", "--conn_count", "4", "--skip_blocks", "5", "get_new_address"]; let cli_opts = CliOpts::from_iter(&cli_args); let expected_cli_opts = CliOpts { network: Network::Bitcoin, subcommand: CliSubCommand::Wallet { wallet_opts: WalletOpts { wallet: "main".to_string(), verbose: false, descriptor: "wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)".to_string(), change_descriptor: Some("wpkh(xpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)".to_string()), compactfilter_opts: CompactFilterOpts{ address: vec!["127.0.0.1:18444".to_string(), "127.2.3.1:19695".to_string()], conn_count: 4, skip_blocks: 5, }, proxy_opts: ProxyOpts{ proxy: Some("127.0.0.1:9005".to_string()), proxy_auth: Some(("random_user".to_string(), "random_passwd".to_string())), retries: 5, } }, subcommand: WalletSubCommand::OfflineWalletSubCommand(GetNewAddress), }, }; assert_eq!(expected_cli_opts, cli_opts); } #[cfg(any( feature = "electrum", feature = "esplora", feature = "compact_filters", feature = "rpc" ))] #[test] fn test_parse_wallet_sync() { let cli_args = vec!["bdk-cli", "--network", "testnet", "wallet", "--descriptor", "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)", "sync", "--max_addresses", "50"]; let cli_opts = CliOpts::from_iter(&cli_args); let expected_cli_opts = CliOpts { network: Network::Testnet, subcommand: CliSubCommand::Wallet { wallet_opts: WalletOpts { wallet: "main".to_string(), verbose: false, descriptor: "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)".to_string(), change_descriptor: None, #[cfg(feature = "electrum")] electrum_opts: ElectrumOpts { timeout: None, server: "ssl://electrum.blockstream.info:60002".to_string(), stop_gap: 10, }, #[cfg(feature = "esplora-ureq")] esplora_opts: EsploraOpts { server: "https://blockstream.info/testnet/api/".to_string(), read_timeout: 5, write_timeout: 5, stop_gap: 10, }, #[cfg(feature = "esplora-reqwest")] esplora_opts: EsploraOpts { server: "https://blockstream.info/testnet/api/".to_string(), conc: 4, stop_gap: 10, }, #[cfg(feature = "compact_filters")] compactfilter_opts: CompactFilterOpts{ address: vec!["127.0.0.1:18444".to_string()], conn_count: 4, skip_blocks: 0, }, #[cfg(any(feature="compact_filters", feature="electrum", feature="esplora"))] proxy_opts: ProxyOpts{ proxy: None, proxy_auth: None, retries: 5, }, #[cfg(feature = "rpc")] rpc_opts: RpcOpts { address: "127.0.0.1:18443".to_string(), auth: ("user".to_string(), "password".to_string()), skip_blocks: None, }, }, subcommand: WalletSubCommand::OnlineWalletSubCommand(Sync { max_addresses: Some(50) }), }, }; assert_eq!(expected_cli_opts, cli_opts); } #[test] fn test_parse_wallet_create_tx() { let cli_args = vec!["bdk-cli", "--network", "testnet", "wallet", "--descriptor", "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)", "--change_descriptor", "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)", "create_tx", "--to", "n2Z3YNXtceeJhFkTknVaNjT1mnCGWesykJ:123456","mjDZ34icH4V2k9GmC8niCrhzVuR3z8Mgkf:78910", "--utxos","87345e46bfd702d24d54890cc094d08a005f773b27c8f965dfe0eb1e23eef88e:1", "--utxos","87345e46bfd702d24d54890cc094d08a005f773b27c8f965dfe0eb1e23eef88e:2"]; let cli_opts = CliOpts::from_iter(&cli_args); let script1 = Address::from_str("n2Z3YNXtceeJhFkTknVaNjT1mnCGWesykJ") .unwrap() .script_pubkey(); let script2 = Address::from_str("mjDZ34icH4V2k9GmC8niCrhzVuR3z8Mgkf") .unwrap() .script_pubkey(); let outpoint1 = OutPoint::from_str( "87345e46bfd702d24d54890cc094d08a005f773b27c8f965dfe0eb1e23eef88e:1", ) .unwrap(); let outpoint2 = OutPoint::from_str( "87345e46bfd702d24d54890cc094d08a005f773b27c8f965dfe0eb1e23eef88e:2", ) .unwrap(); let expected_cli_opts = CliOpts { network: Network::Testnet, subcommand: CliSubCommand::Wallet { wallet_opts: WalletOpts { wallet: "main".to_string(), verbose: false, descriptor: "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)".to_string(), change_descriptor: Some("wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)".to_string()), #[cfg(feature = "electrum")] electrum_opts: ElectrumOpts { timeout: None, server: "ssl://electrum.blockstream.info:60002".to_string(), stop_gap: 10, }, #[cfg(feature = "esplora-ureq")] esplora_opts: EsploraOpts { server: "https://blockstream.info/testnet/api/".to_string(), read_timeout: 5, write_timeout: 5, stop_gap: 10, }, #[cfg(feature = "esplora-reqwest")] esplora_opts: EsploraOpts { server: "https://blockstream.info/testnet/api/".to_string(), conc: 4, stop_gap: 10, }, #[cfg(feature = "compact_filters")] compactfilter_opts: CompactFilterOpts{ address: vec!["127.0.0.1:18444".to_string()], conn_count: 4, skip_blocks: 0, }, #[cfg(any(feature="compact_filters", feature="electrum", feature="esplora"))] proxy_opts: ProxyOpts{ proxy: None, proxy_auth: None, retries: 5, }, #[cfg(feature = "rpc")] rpc_opts: RpcOpts { address: "127.0.0.1:18443".to_string(), auth: ("user".to_string(), "password".to_string()), skip_blocks: None, }, }, subcommand: WalletSubCommand::OfflineWalletSubCommand(CreateTx { recipients: vec![(script1, 123456), (script2, 78910)], send_all: false, enable_rbf: false, offline_signer: false, utxos: Some(vec!(outpoint1, outpoint2)), unspendable: None, fee_rate: None, external_policy: None, internal_policy: None, }), }, }; assert_eq!(expected_cli_opts, cli_opts); } #[test] fn test_parse_wallet_bump_fee() { let cli_args = vec!["bdk-cli", "--network", "testnet", "wallet", "--descriptor", "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)", "--change_descriptor", "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)", "bump_fee", "--fee_rate", "6.1", "--txid","35aab0d0213f8996f9e236a28630319b93109754819e8abf48a0835708d33506", "--shrink","tb1ql7w62elx9ucw4pj5lgw4l028hmuw80sndtntxt"]; let cli_opts = CliOpts::from_iter(&cli_args); let expected_cli_opts = CliOpts { network: Network::Testnet, subcommand: CliSubCommand::Wallet { wallet_opts: WalletOpts { wallet: "main".to_string(), verbose: false, descriptor: "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)".to_string(), change_descriptor: Some("wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/1/*)".to_string()), #[cfg(feature = "electrum")] electrum_opts: ElectrumOpts { timeout: None, server: "ssl://electrum.blockstream.info:60002".to_string(), stop_gap: 10, }, #[cfg(feature = "esplora-ureq")] esplora_opts: EsploraOpts { server: "https://blockstream.info/testnet/api/".to_string(), read_timeout: 5, write_timeout: 5, stop_gap: 10, }, #[cfg(feature = "esplora-reqwest")] esplora_opts: EsploraOpts { server: "https://blockstream.info/testnet/api/".to_string(), conc: 4, stop_gap: 10, }, #[cfg(feature = "compact_filters")] compactfilter_opts: CompactFilterOpts{ address: vec!["127.0.0.1:18444".to_string()], conn_count: 4, skip_blocks: 0, }, #[cfg(any(feature="compact_filters", feature="electrum", feature="esplora"))] proxy_opts: ProxyOpts{ proxy: None, proxy_auth: None, retries: 5, } }, subcommand: WalletSubCommand::OfflineWalletSubCommand(BumpFee { txid: "35aab0d0213f8996f9e236a28630319b93109754819e8abf48a0835708d33506".to_string(), shrink_address: Some(Address::from_str("tb1ql7w62elx9ucw4pj5lgw4l028hmuw80sndtntxt").unwrap()), offline_signer: false, utxos: None, unspendable: None, fee_rate: 6.1 }), }, }; assert_eq!(expected_cli_opts, cli_opts); } #[cfg(any( feature = "electrum", feature = "esplora", feature = "compact_filters", feature = "rpc" ))] #[test] fn test_parse_wallet_broadcast() { let cli_args = vec!["bdk-cli", "--network", "testnet", "wallet", "--descriptor", "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)", "broadcast", "--psbt", "cHNidP8BAEICAAAAASWhGE1AhvtO+2GjJHopssFmgfbq+WweHd8zN/DeaqmDAAAAAAD/////AQAAAAAAAAAABmoEAAECAwAAAAAAAAA="]; let cli_opts = CliOpts::from_iter(&cli_args); let expected_cli_opts = CliOpts { network: Network::Testnet, subcommand: CliSubCommand::Wallet { wallet_opts: WalletOpts { wallet: "main".to_string(), verbose: false, descriptor: "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)".to_string(), change_descriptor: None, #[cfg(feature = "electrum")] electrum_opts: ElectrumOpts { timeout: None, server: "ssl://electrum.blockstream.info:60002".to_string(), stop_gap: 10, }, #[cfg(feature = "esplora-ureq")] esplora_opts: EsploraOpts { server: "https://blockstream.info/testnet/api/".to_string(), read_timeout: 5, write_timeout: 5, stop_gap: 10, }, #[cfg(feature = "esplora-reqwest")] esplora_opts: EsploraOpts { server: "https://blockstream.info/testnet/api/".to_string(), conc: 4, stop_gap: 10, }, #[cfg(feature = "compact_filters")] compactfilter_opts: CompactFilterOpts{ address: vec!["127.0.0.1:18444".to_string()], conn_count: 4, skip_blocks: 0, }, #[cfg(any(feature="compact_filters", feature="electrum", feature="esplora"))] proxy_opts: ProxyOpts{ proxy: None, proxy_auth: None, retries: 5, }, #[cfg(feature = "rpc")] rpc_opts: RpcOpts { address: "127.0.0.1:18443".to_string(), auth: ("user".to_string(), "password".to_string()), skip_blocks: None, }, }, subcommand: WalletSubCommand::OnlineWalletSubCommand(Broadcast { psbt: Some("cHNidP8BAEICAAAAASWhGE1AhvtO+2GjJHopssFmgfbq+WweHd8zN/DeaqmDAAAAAAD/////AQAAAAAAAAAABmoEAAECAwAAAAAAAAA=".to_string()), tx: None }), }, }; assert_eq!(expected_cli_opts, cli_opts); } #[test] fn test_parse_wrong_network() { let cli_args = vec!["repl", "--network", "badnet", "wallet", "--descriptor", "wpkh(tpubDEnoLuPdBep9bzw5LoGYpsxUQYheRQ9gcgrJhJEcdKFB9cWQRyYmkCyRoTqeD4tJYiVVgt6A3rN6rWn9RYhR9sBsGxji29LYWHuKKbdb1ev/0/*)", "sync", "--max_addresses", "50"]; let cli_opts = CliOpts::from_iter_safe(&cli_args); assert!(cli_opts.is_err()); } #[test] fn test_key_generate() { let network = Testnet; let key_generate_cmd = KeySubCommand::Generate { word_count: 12, password: Some("test123".to_string()), }; let result = handle_key_subcommand(network, key_generate_cmd).unwrap(); let result_obj = result.as_object().unwrap(); let mnemonic = result_obj.get("mnemonic").unwrap().as_str().unwrap(); let mnemonic: Vec<&str> = mnemonic.split(' ').collect(); let xprv = result_obj.get("xprv").unwrap().as_str().unwrap(); assert_eq!(mnemonic.len(), 12); assert_eq!(&xprv[0..4], "tprv"); } #[test] fn test_key_restore() { let network = Testnet; let key_generate_cmd = KeySubCommand::Restore { mnemonic: "payment battle unit sword token broccoli era violin purse trip blood hire" .to_string(), password: Some("test123".to_string()), }; let result = handle_key_subcommand(network, key_generate_cmd).unwrap(); let result_obj = result.as_object().unwrap(); let fingerprint = result_obj.get("fingerprint").unwrap().as_str().unwrap(); let xprv = result_obj.get("xprv").unwrap().as_str().unwrap(); assert_eq!(&fingerprint, &"828af366"); assert_eq!(&xprv, &"tprv8ZgxMBicQKsPd18TeiFknZKqaZFwpdX9tvvKh8eeHSSPBQi5g9xPHztBg411o78G8XkrhQb6Q1cVvBJ1a9xuFHpmWgvQsvkJkNxBjfGoqhK"); } #[test] fn test_key_derive() { let network = Testnet; let key_generate_cmd = KeySubCommand::Derive { xprv: ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPfQjJy8ge2cvBfDjLxJSkvNLVQiw7BQ5gTjKadG2rrcQB5zjcdaaUTz5EDNJaS77q4DzjqjogQBfMsaXFFNP3UqoBnwt2kyT").unwrap(), path: DerivationPath::from_str("m/84'/1'/0'/0").unwrap(), }; let result = handle_key_subcommand(network, key_generate_cmd).unwrap(); let result_obj = result.as_object().unwrap(); let xpub = result_obj.get("xpub").unwrap().as_str().unwrap(); let xprv = result_obj.get("xprv").unwrap().as_str().unwrap(); assert_eq!(&xpub, &"[566844c5/84'/1'/0'/0]tpubDFeqiDkfwR1tAhPxsXSZMfEmfpDhwhLyhLKZgmeBvuBkZQusoWeL62oGg2oTNGcENeKdwuGepAB85eMvyLemabYe9PSqv6cr5mFXktHc3Ka/*"); assert_eq!(&xprv, &"[566844c5/84'/1'/0'/0]tprv8ixoZoiRo3LDHENAysmxxFaf6nhmnNA582inQFbtWdPMivf7B7pjuYBQVuLC5bkM7tJZEDbfoivENsGZPBnQg1n52Kuc1P8X2Ei3XJuJX7c/*"); } #[cfg(feature = "compiler")] #[test] fn test_parse_compile() { let cli_args = vec![ "bdk-cli", "compile", "thresh(3,pk(Alice),pk(Bob),pk(Carol),older(2))", "--type", "sh-wsh", ]; let cli_opts = CliOpts::from_iter(&cli_args); let expected_cli_opts = CliOpts { network: Network::Testnet, subcommand: CliSubCommand::Compile { policy: "thresh(3,pk(Alice),pk(Bob),pk(Carol),older(2))".to_string(), script_type: "sh-wsh".to_string(), }, }; assert_eq!(expected_cli_opts, cli_opts); } #[cfg(feature = "compiler")] #[test] fn test_compile() { let result = handle_compile_subcommand( Network::Testnet, "thresh(3,pk(Alice),pk(Bob),pk(Carol),older(2))".to_string(), "sh-wsh".to_string(), ) .unwrap(); let result_obj = result.as_object().unwrap(); let descriptor = result_obj.get("descriptor").unwrap().as_str().unwrap(); assert_eq!( &descriptor, &"sh(wsh(thresh(3,pk(Alice),s:pk(Bob),s:pk(Carol),sdv:older(2))))#l4qaawgv" ); } }
extern crate ion_core; use self::ion_core::engine::App; fn main () { let app = App::builder () .project_name ("ionSandbox") .project_developer ("ionProject") .build () .unwrap (); app.init (); app.run (); app.exit (); }
pub fn gen1(){ let p = Point{x:5,y:2}; let x = p.x() + p.y(); println!("generic 1 {}",x); } struct Point<T> { x:T, y:T, } impl<T> Point<T> { fn x(&self) -> &T { &self.x } fn y(&self) -> &T { &self.y } }
pub(crate) mod query_plan_tree; use self::query_plan_tree::QueryPlanTree; /// Query plan from which an executor can do its work deterministically. #[derive(Clone, PartialEq, Debug, new)] pub(crate) struct QueryPlan { pub(crate) plan_tree: QueryPlanTree, // TODO evaluated cost, etc... // See PostgreSQL's plan structure: <https://github.com/postgres/postgres/blob/master/src/include/nodes/plannodes.h#L110> }
mod my_mod { // default to private visibility fn private_function() { println!("called `my_mod::private_function()`"); } // use the `pub` modifier to override default visibility pub fn function() { println!("called `my_mod::function()`"); } // items can access other items in the same module, // even when private pub fn indirect_access() { print!("called `my_mod::indirect_access()`, that\n> "); private_function(); } // Modules can also be nested pub mod nested { pub fn function() { println!("called `my_mod::nested::function()`"); } #[allow(dead_code)] fn private_function() { println!("called `my_mod::nester::private_function()`"); } // functions declared using `pub(in path)` syntax are only visible // within the given path. `path` must be a parent or ancestor module pub(in crate::my_mod) fn public_function_in_my_mod() { print!("called `my_mod::nested::public_function_in_my_mod()`, that\n> "); public_function_in_nested(); } // functions declared using `pub(self)` syntax are only visible // within the current module, which is the same as leaving them private pub(self) fn public_function_in_nested() { println!("called `my_mod::nested::public_function_in_nested()`"); } // functions declared using `pub(super)` syntax are only visible // within the parent module pub(super) fn public_function_in_super_mod() { println!("called `my_mod::nested::public_function_in_super_mod()`"); } } pub fn call_public_function_in_my_mod() { print!("called `my_mod::call_public_function_in_my_mod()`, that\n> "); nested::public_function_in_my_mod(); print!("> "); nested::public_function_in_super_mod(); } // pub(crate) makes functions visible only in the current crate pub(crate) fn public_function_in_crate() { println!("called `my_mod::public_function_in_crate()`"); } // Nested modules follow the same rules for visibility mod private_nested { #[allow(dead_code)] pub fn function() { println!("called `my_mod::private_nested::function()`"); } // private parent items will still restrict the visibility of a // child item, even if it is declared as visible within a bigger scope. #[allow(dead_code)] pub(crate) fn restricted_function() { println!("called `my_mod::private_nested::restricted_function()`"); } } } fn function() { println!("called `function()`"); } fn main() { // Modules allow disambiguations between items that have the same name function(); my_mod::function(); my_mod::indirect_access(); my_mod::nested::function(); my_mod::call_public_function_in_my_mod(); my_mod::public_function_in_crate(); // path items can only be called from within the mode specified // my_mod::nested::public_function_in_my_mod(); // Cant call private funcs... // etc. }
use crate::schema::*; use crate::schema_dsl::*; use diesel::dsl::*; use diesel::pg::expression::dsl::OnlyDsl; use diesel::*; table! { users (id) { id -> Int8, name -> Text, table_nr -> Int8, } } #[derive(Debug, PartialEq, Eq, Queryable, Clone, Insertable, AsChangeset, Selectable)] #[diesel(table_name = users)] pub struct NewUser { pub name: String, } #[test] fn select_from_only_with_inherited_table() { let connection = &mut connection(); drop_table_cascade(connection, "users"); create_table( "users", ( integer("id").primary_key().auto_increment(), string("name").not_null(), integer("table_nr").not_null().default("1"), ), ) .execute(connection) .unwrap(); connection .execute("CREATE TABLE users2 (check (table_nr = 2)) inherits (users)") .unwrap(); connection .execute("INSERT INTO users2 (name, table_nr) VALUES ('hello', 2)") .unwrap(); // There is now only one entry in the users2 table, none in the users table. let n_users = users::table .select(count(users::id)) .first::<i64>(connection) .unwrap(); assert_eq!(n_users, 1); let n_users_in_main_table = users::table .only() .select(count(users::id)) .first::<i64>(connection) .unwrap(); assert_eq!(n_users_in_main_table, 0); } #[test] fn select_from_only_filtering() { // Test that it's possible to call `.only().filter(..)` let connection = &mut connection(); drop_table_cascade(connection, "users"); create_table( "users", ( integer("id").primary_key().auto_increment(), string("name").not_null(), ), ) .execute(connection) .unwrap(); diesel::insert_into(users::table) .values(users::name.eq("hello")) .execute(connection) .unwrap(); diesel::insert_into(users::table) .values(users::name.eq("world")) .execute(connection) .unwrap(); let results = users::table .only() .filter(users::name.eq("world")) .select(users::name) .load::<String>(connection) .unwrap(); assert_eq!(results.len(), 1); }
use crate::PortRangeList; use contracts::requires; pub struct PingPortPicker { remaining_ping_count: Option<u32>, port_ranges: PortRangeList, next_port: u16, next_port_range_index: usize, } impl PingPortPicker { #[allow(unreachable_code)] #[requires(port_ranges.ranges.len() > 0)] #[requires(port_ranges.ranges.iter().filter(|r| r.start() == &0 || r.end() == &0 || r.start() > r.end()).count() == 0)] pub fn new(ping_count: Option<u32>, mut port_ranges: PortRangeList, skip_port_count: u32) -> PingPortPicker { port_ranges.ranges.sort_by(|a, b| a.start().cmp(b.start())); let next_port = *port_ranges.ranges[0].start(); let mut port_picker = PingPortPicker { remaining_ping_count: ping_count, port_ranges, next_port, next_port_range_index: 0 }; for _ in 0..skip_port_count { port_picker.next(); } return port_picker; } fn fetch_next_available_port(&mut self) -> Option<u16> { match self.remaining_ping_count { Some(remaining_ping_count) if remaining_ping_count == 0 => return None, Some(remaining_ping_count) => self.remaining_ping_count = Some(remaining_ping_count - 1), None => (), } return Some(self.fetch_next_available_port_from_port_ranges()); } fn fetch_next_available_port_from_port_ranges(&mut self) -> u16 { let port = self.next_port; self.next_port = if self.next_port >= *(self.port_ranges.ranges[self.next_port_range_index].end()) { self.next_port_range_index += 1; if self.next_port_range_index >= self.port_ranges.ranges.len() { self.next_port_range_index = 0; } *self.port_ranges.ranges[self.next_port_range_index].start() } else { self.next_port + 1 }; return port; } } impl Iterator for PingPortPicker { type Item = u16; fn next(&mut self) -> Option<Self::Item> { return self.fetch_next_available_port(); } } #[cfg(test)] mod tests { use super::*; #[test] fn ping_port_picker_should_work_with_port_range_1() { assert_eq!(vec![1024, 1024, 1024], PingPortPicker::new(Some(3), PortRangeList { ranges: vec![(1024..=1024)] }, 0).collect::<Vec<u16>>()); } #[test] fn ping_port_picker_should_work_with_limited_ping_count() { assert_eq!(vec![1024, 1025], PingPortPicker::new(Some(2), PortRangeList { ranges: vec![(1024..=1027)] }, 0).collect::<Vec<u16>>()); } #[test] fn ping_port_picker_should_work_with_ping_count_larger_than_range() { assert_eq!( vec![1024, 1025, 1026, 1027, 1024, 1025], PingPortPicker::new(Some(6), PortRangeList { ranges: vec![(1024..=1027)] }, 0).collect::<Vec<u16>>() ); } #[test] #[should_panic] fn ping_port_picker_should_panic_on_zero_min_port() { PingPortPicker::new(Some(3), PortRangeList { ranges: vec![(0..=1024)] }, 0); } #[test] #[should_panic] fn ping_port_picker_should_panic_on_zero_max_port() { PingPortPicker::new(Some(3), PortRangeList { ranges: vec![(1024..=0)] }, 0); } #[test] #[should_panic] fn ping_port_picker_should_panic_when_min_port_is_larger_than_max_port() { PingPortPicker::new(Some(3), PortRangeList { ranges: vec![(1028..=1024)] }, 0); } #[test] fn ping_port_picker_should_work_with_port_list() { assert_eq!( vec![1024, 1025, 1026, 1024, 1025], PingPortPicker::new(Some(5), PortRangeList { ranges: vec![(1024..=1024), (1025..=1025), (1026..=1026)] }, 0).collect::<Vec<u16>>() ); } #[test] #[should_panic] fn ping_port_picker_should_panic_when_port_list_is_empty() { PingPortPicker::new(Some(3), PortRangeList { ranges: vec![] }, 0); } }
use parse::ParseError; use rand::{self, Rand, Rng, SeedableRng, XorShiftRng}; use std::cell::RefCell; use std::str::FromStr; thread_local! { static WEAK_RNG: RefCell<XorShiftRng> = RefCell::new( XorShiftRng::new_unseeded() ); } #[derive(Clone, Copy, Debug)] pub struct Seed([u32; 4]); impl Seed { pub fn random() -> Self { let mut rng = rand::thread_rng(); Seed( [ rng.next_u32().wrapping_add(rng.next_u32()), rng.next_u32().wrapping_add(rng.next_u32()), rng.next_u32().wrapping_add(rng.next_u32()), rng.next_u32().wrapping_add(rng.next_u32()), ], ) } } impl FromStr for Seed { type Err = ParseError; fn from_str(input: &str) -> Result<Self, Self::Err> { let mut tokens = input .split(|c| c == '[' || c == ']' || c == ' ' || c == ',') .filter_map(|s| s.parse().ok()); let mut result = [0; 4]; for result in &mut result { *result = tokens.next().ok_or(ParseError)?; } Ok(Seed(result)) } } /// Set the seed used for the random number generator. pub fn reseed(seed: Seed) { with_rng(|rng| rng.reseed(seed.0)) } /// Random value from the thread-local weak RNG. pub fn gen<T: Rand>() -> T { with_rng(|rng| rng.gen()) } /// Sample values from an iterator. #[allow(unused)] pub fn sample<T, I>(iterable: I, amount: usize) -> Vec<T> where I: IntoIterator<Item = T>, { with_rng(|rng| rand::sample(rng, iterable, amount)) } /// Generate random boolean with the given probability that it comes up true. pub fn gen_bool_with_probability(p: f64) -> bool { gen::<f64>() <= p } fn with_rng<F: FnOnce(&mut XorShiftRng) -> R, R>(f: F) -> R { WEAK_RNG.with(|rng| f(&mut *rng.borrow_mut())) }
use cosmwasm_std::StdError; use cw20_base::ContractError as Cw20ContractError; use thiserror::Error; #[derive(Error, Debug)] pub enum ContractError { #[error("{0}")] Std(#[from] StdError), #[error("Unauthorized")] Unauthorized {}, // Add any other custom errors you like here. // Look at https://docs.rs/thiserror/1.0.21/thiserror/ for details. #[error("No {denom} tokens sent")] EmptyBalance { denom: String }, #[error("Cannot set to own account")] CannotSetOwnAccount {}, #[error("Invalid zero amount")] InvalidZeroAmount {}, #[error("Allowance is expired")] Expired {}, #[error("No allowance for this account")] NoAllowance {}, #[error("Minting cannot exceed the cap")] CannotExceedCap {}, #[error("Logo binary data exceeds 5KB limit")] LogoTooBig {}, #[error("Invalid xml preamble for SVG")] InvalidXmlPreamble {}, #[error("Invalid png header")] InvalidPngHeader {}, } impl From<Cw20ContractError> for ContractError { fn from(err: Cw20ContractError) -> Self { match err { Cw20ContractError::Std(error) => ContractError::Std(error), Cw20ContractError::Unauthorized {} => ContractError::Unauthorized {}, Cw20ContractError::CannotSetOwnAccount {} => ContractError::CannotSetOwnAccount {}, Cw20ContractError::InvalidZeroAmount {} => ContractError::InvalidZeroAmount {}, Cw20ContractError::Expired {} => ContractError::Expired {}, Cw20ContractError::NoAllowance {} => ContractError::NoAllowance {}, Cw20ContractError::CannotExceedCap {} => ContractError::CannotExceedCap {}, Cw20ContractError::LogoTooBig {} => ContractError::LogoTooBig {}, Cw20ContractError::InvalidXmlPreamble {} => ContractError::InvalidXmlPreamble {}, Cw20ContractError::InvalidPngHeader {} => ContractError::InvalidPngHeader {}, } } }
// There's a file here. (6.txt) It's been base64'd after being encrypted with // repeating-key XOR. // Decrypt it. // Here's how: // 1. Let KEYSIZE be the guessed length of the key; // try values from 2 to (say) 40. // 2. Write a function to compute the edit distance/Hamming distance between // two strings. The Hamming distance is just the number of differing bits. // The distance between: // this is a test // and // wokka wokka!!! // is 37. Make sure your code agrees before you proceed. // 3. For each KEYSIZE, take the first KEYSIZE worth of bytes, // and the second KEYSIZE worth of bytes, and find the edit distance // between them. Normalize this result by dividing by KEYSIZE. // 4. The KEYSIZE with the smallest normalized edit distance is probably // the key. You could proceed perhaps with the smallest 2-3 KEYSIZE values. // Or take 4 KEYSIZE blocks instead of 2 and average the distances. // 5. Now that you probably know the KEYSIZE: break the ciphertext into blocks // of KEYSIZE length. // 6. Now transpose the blocks: make a block that is the first byte of every // block, and a block that is the second byte of every block, and so on. // 7. Solve each block as if it was single-character XOR. You already have code // to do this. // 8. For each block, the single-byte XOR key that produces the best looking // histogram is the repeating-key XOR key byte for that block. Put them // together and you have the key. // This code is going to turn out to be surprisingly useful later on. // Breaking repeating-key XOR ("Vigenere") statistically is obviously an // academic exercise, a "Crypto 101" thing. But more people "know how" to break // it than can actually break it, and a similar technique breaks something much // more important. #[macro_use] extern crate log; extern crate cryptopalslib; use std::cmp; use std::cmp::Ordering; use std::str; #[cfg(not(test))] use std::env; #[cfg(not(test))] use std::io::prelude::*; #[cfg(not(test))] use std::io::BufReader; #[cfg(not(test))] use std::fs::File; #[cfg(not(test))] use std::path::Path; #[cfg(not(test))] fn main() { println!("Decoding..."); if env::args().count() < 2 { panic!("Must pass a file to decode") } let arg = match env::args().nth(1) { Some(s) => s, None => panic!("No input argument given") }; let (key, output) = break_repeating_key_xor_in_file(&arg); println!("key: {:?}", key); println!("output: {:?}", output); } #[cfg(not(test))] fn break_repeating_key_xor_in_file(path: &str) -> (String, String) { let path = Path::new(path); let file = BufReader::new(File::open(&path).unwrap()); let lines = file.lines().map(|x| x.unwrap()).collect(); break_repeating_key_xor_in_lines(lines) } fn break_repeating_key_xor_in_lines(lines: Vec<String>) -> (String, String) { let input = cryptopalslib::convert::base64_lines_to_hex(&lines); let nums = cryptopalslib::convert::hex_string_to_decimal_pairs(&input); break_repeating_key_xor(nums) } fn break_repeating_key_xor(bytes: Vec<u8>) -> (String, String) { debug!("{:?}", bytes.len()); let results = rank_keylengths(&bytes); debug!("{:?}", results); // loop over the vector of keysizes for &(_, keysize) in results.iter() { debug!("{:?}", keysize); // find the best key and the corresponding decoded column for // each byte of the keysize. let (best_keys, decoded_columns) = find_best_key_and_columns(keysize, &bytes); // if we've got something in the output string list, // we might have figured out the key, so merge the strings // into something readable if decoded_columns.len() != 0 { let mut output_string = String::new(); let string_len = decoded_columns[0].len(); for index in 0..string_len { for str_index in 0..decoded_columns.len() { if index < decoded_columns[str_index].len() { output_string.push(decoded_columns[str_index].chars().nth(index).unwrap()); } } } let key = match str::from_utf8(&best_keys) { Ok(v) => v.to_string(), Err(_) => { format!("{:?}", best_keys) } }; return (key.to_string(), output_string); } } ("".to_string(), "".to_string()) } fn find_best_key_and_columns(keysize: usize, bytes: &Vec<u8>) -> (Vec<u8>, Vec<String>) { let mut decoded_columns = vec!(); let mut best_keys = vec!(); for index in 0..keysize { let filtered_bytes: Vec<_> = bytes.iter().enumerate() .filter( |&(x, _)| { if index > x { return false } else { return (x - index) % keysize == 0 } }) .map(|(_, &y)| y) .collect(); let (_, best_key, string) = cryptopalslib::xor::score_and_xor(filtered_bytes); // if we get an empty string back, we don't have any valid text, // so throw out this keysize if string == "" { best_keys = vec!(); decoded_columns = vec!(); break; } best_keys.push(best_key); decoded_columns.push(string); } (best_keys, decoded_columns) } fn rank_keylengths(bytes: &Vec<u8>) -> Vec<(f32, usize)> { let mut results: Vec<(f32, usize)> = Vec::new(); for keysize in 2..cmp::min(40, (bytes.len() / 3)) { // take first keysize of bytes let first = &bytes[0..keysize]; // take second keysize of bytes let second = &bytes[keysize..keysize*2]; // take third keysize of bytes let third = &bytes[keysize*2..keysize*3]; // average the hamming distance between them let distance = (cryptopalslib::score::hamming_distance(first, second) + cryptopalslib::score::hamming_distance(second, third) + cryptopalslib::score::hamming_distance(first, third)) / 3; // normalize the average distance by the keysize let normalized = distance as f32 / keysize as f32; results.push((normalized, keysize)); } // sort the vector from least to greatest average distance. results.sort_by(|&(x1, _), &(x2, _)| x1.partial_cmp(&x2).unwrap_or(Ordering::Equal)); results } #[cfg(test)] mod set1challenge6 { #[test] fn decode() { // taken from https://picoctf.com/crypto_mats/index.html let input = vec!("mIdwJYSyjmxxt7uZfnGVv4F6OIS/mDU4ifqffTTHvIp2Jceug3Qly/qeeyWOtstzMI6oh2xxlb+IcD+TtpI5cY6uy2IwlPqbZz6Fu4l5KMeYmXwlhrOFMiLHuI5mJcexjmUlx6mOdiOCrsU1BY+zmDU4lPqJcDKGr5hwcYi8y2E5gvqYcDKVv4hscZSvmWc+krSPfD+A+op5Pceug3BxhrmffCeOroJwIse5imcjjr+PNT6J+oNwI4L6j2AjjrSMNQaIqIdxcbC7mTUFkLXLYjCU+oRzcZGzn3Q9x7OGZT6Vrop7MoL6n3pxiK+ZNT+GroJ6P4a2y2Y0hK+ZfCWe+op7Ncevh2E4irufcHGRs4hhPpWjxQ==".to_string()); let (_, output) = super::break_repeating_key_xor_in_lines(input); assert_eq!(output, "Bletchey Park rejoices in the fact that, until fairly recently, it was probably Britain's best kept secret. This is because of the secrecy surrounding all the activities carried on here during World War Two was of vital importance to our national security and ultimate victory."); } }
// error-pattern:fail use std; import std::map; import std::uint; fn main() { let count = @mutable 0u; let hash = bind fn (_s: [@str], count: @mutable uint) -> uint { *count += 1u; if *count == 10u { fail; } else { ret *count; } } (_, count); fn eq(s: [@str], t: [@str]) -> bool { ret s == t; } let map = map::mk_hashmap(hash, eq); let arr = []; for each i in uint::range(0u, 10u) { arr += [@"key stuff"]; map.insert(arr, arr + [@"value stuff"]); } }
use std::{borrow::Cow, cmp::Ordering::Equal, sync::Arc}; use eyre::Report; use rosu_v2::prelude::{GameMode, OsuError}; use crate::{ commands::osu::UserArgs, custom_client::SnipeCountryPlayer as SCP, database::OsuData, embeds::{CountrySnipeListEmbed, EmbedData}, pagination::{CountrySnipeListPagination, Pagination}, util::{ constants::{common_literals::SORT, HUISMETBENEN_ISSUE, OSU_API_ISSUE}, numbers, CountryCode, CowUtils, MessageExt, }, Args, BotResult, CommandData, Context, }; #[command] #[short_desc("Sort the country's #1 leaderboard")] #[long_desc( "Sort the country's #1 leaderboard.\n\ To specify a country, you must provide its acronym e.g. `be` \ or alternatively you can provide `global`.\n\ To specify an order, you must provide `sort=...` with any of these values:\n\ - `count` to sort by #1 count\n \ - `pp` to sort by average pp of #1 scores\n \ - `stars` to sort by average star rating of #1 scores\n \ - `weighted` to sort by pp gained only from #1 scores\n\ If no ordering is specified, it defaults to `count`.\n\ If no country is specified either, I will take the country of the linked user.\n\ All data originates from [Mr Helix](https://osu.ppy.sh/users/2330619)'s \ website [huismetbenen](https://snipe.huismetbenen.nl/)." )] #[usage("[country acronym] [sort=count/pp/stars/weighted]")] #[example("global sort=stars", "fr sort=weighted", "sort=pp")] #[aliases("csl", "countrysnipeleaderboard", "cslb")] #[bucket("snipe")] async fn countrysnipelist(ctx: Arc<Context>, data: CommandData) -> BotResult<()> { match data { CommandData::Message { msg, mut args, num } => match CountryListArgs::args(&ctx, &mut args) { Ok(list_args) => { _countrysnipelist(ctx, CommandData::Message { msg, args, num }, list_args).await } Err(content) => msg.error(&ctx, content).await, }, CommandData::Interaction { command } => super::slash_snipe(ctx, *command).await, } } pub(super) async fn _countrysnipelist( ctx: Arc<Context>, data: CommandData<'_>, args: CountryListArgs, ) -> BotResult<()> { let author_id = data.author()?.id; // Retrieve author's osu user to check if they're in the list let osu_user = match ctx .psql() .get_user_osu(author_id) .await .map(|osu| osu.map(OsuData::into_username)) { Ok(Some(name)) => { let user_args = UserArgs::new(name.as_str(), GameMode::STD); match ctx.redis().osu_user(&user_args).await { Ok(user) => Some(user), Err(OsuError::NotFound) => { let content = format!("User `{name}` was not found"); return data.error(&ctx, content).await; } Err(why) => { let _ = data.error(&ctx, OSU_API_ISSUE).await; return Err(why.into()); } } } Ok(None) => None, Err(why) => { let wrap = "failed to get UserConfig for user author_id"; warn!("{:?}", Report::new(why).wrap_err(wrap)); None } }; let CountryListArgs { country, sort } = args; let country_code = match country { Some(country) => country, None => match osu_user { Some(ref user) => { if ctx.contains_country(user.country_code.as_str()) { user.country_code.as_str().into() } else { let content = format!( "`{}`'s country {} is not supported :(", user.username, user.country ); return data.error(&ctx, content).await; } } None => { let content = "Since you're not linked, you must specify a country (code)"; return data.error(&ctx, content).await; } }, }; // Request players let mut players = match ctx.clients.custom.get_snipe_country(&country_code).await { Ok(players) => players, Err(why) => { let _ = data.error(&ctx, HUISMETBENEN_ISSUE).await; return Err(why.into()); } }; // Sort players let sorter = match sort { SnipeOrder::Count => |p1: &SCP, p2: &SCP| p2.count_first.cmp(&p1.count_first), SnipeOrder::Pp => |p1: &SCP, p2: &SCP| p2.avg_pp.partial_cmp(&p1.avg_pp).unwrap_or(Equal), SnipeOrder::Stars => { |p1: &SCP, p2: &SCP| p2.avg_sr.partial_cmp(&p1.avg_sr).unwrap_or(Equal) } SnipeOrder::WeightedPp => |p1: &SCP, p2: &SCP| p2.pp.partial_cmp(&p1.pp).unwrap_or(Equal), }; players.sort_unstable_by(sorter); // Try to find author in list let author_idx = osu_user.and_then(|user| { players .iter() .position(|player| player.username == user.username) }); // Enumerate players let players: Vec<_> = players .into_iter() .enumerate() .map(|(idx, player)| (idx + 1, player)) .collect(); // Prepare embed let pages = numbers::div_euclid(10, players.len()); let init_players = players.iter().take(10); let country = ctx .get_country(country_code.as_str()) .map(|name| (name, country_code)); let embed_data = CountrySnipeListEmbed::new(country.as_ref(), sort, init_players, author_idx, (1, pages)); // Creating the embed let builder = embed_data.into_builder().build().into(); let response = data.create_message(&ctx, builder).await?.model().await?; // Pagination let pagination = CountrySnipeListPagination::new(response, players, country, sort, author_idx); let owner = author_id; tokio::spawn(async move { if let Err(err) = pagination.start(&ctx, owner, 60).await { warn!("{:?}", Report::new(err)); } }); Ok(()) } #[derive(Copy, Clone, Eq, PartialEq)] pub enum SnipeOrder { Count, Pp, Stars, WeightedPp, } impl Default for SnipeOrder { fn default() -> Self { Self::Count } } pub(super) struct CountryListArgs { pub country: Option<CountryCode>, pub sort: SnipeOrder, } impl CountryListArgs { fn args(ctx: &Context, args: &mut Args<'_>) -> Result<Self, Cow<'static, str>> { let mut country = None; let mut sort = None; for arg in args.take(2).map(CowUtils::cow_to_ascii_lowercase) { if let Some(idx) = arg.find('=').filter(|&i| i > 0) { let key = &arg[..idx]; let value = arg[idx + 1..].trim_end(); match key { SORT => { sort = match value { "count" => Some(SnipeOrder::Count), "pp" => Some(SnipeOrder::Pp), "stars" => Some(SnipeOrder::Stars), "weighted" | "weightedpp" => Some(SnipeOrder::WeightedPp), _ => { let content = "Failed to parse `sort`. \ Must be either `count`, `pp`, `stars`, or `weighted`."; return Err(content.into()); } }; } _ => { let content = format!("Unrecognized option `{key}`.\nAvailable options are: `sort`."); return Err(content.into()); } } } else if matches!(arg.as_ref(), "global" | "world") { country = Some("global".into()); } else if arg.len() == 2 && arg.is_ascii() { let code = arg.to_uppercase(); if !ctx.contains_country(&code) { let content = format!("The country acronym `{code}` is not supported :("); return Err(content.into()); } country = Some(code.into()) } else if let Some(code) = CountryCode::from_name(arg.as_ref()) { if !code.snipe_supported(ctx) { let content = format!("The country `{code}` is not supported :("); return Err(content.into()); } country = Some(code); } else { let content = format!( "Failed to parse `{arg}`.\n\ It must be either a valid country, a two ASCII character country code or \ `sort=count/pp/stars/weighted`" ); return Err(content.into()); } } let sort = sort.unwrap_or_default(); Ok(Self { country, sort }) } }
use crate::fast_buf::{ConsumeBuf, FastBuf}; use crate::{AsyncRead, AsyncWrite}; use futures_util::ready; use std::io; use std::io::Read; use std::mem; use std::pin::Pin; use std::task::Context; use std::task::Poll; #[derive(Debug)] /// Our own BufReader. /// /// The use AsyncBufRead in poll_for_crlfcrlf requires poll_fill_buf to /// always fill from the underlying reader, also when there is content /// buffered already. pub struct BufIo<R> { inner: R, buf: FastBuf, pos: usize, pending_rx: bool, pending_tx: bool, write_buf: Option<ConsumeBuf>, need_flush: bool, } impl<R> BufIo<R> where R: AsyncRead + AsyncWrite + Unpin, { pub fn with_capacity(capacity: usize, inner: R) -> Self { BufIo { inner, buf: FastBuf::with_capacity(capacity), pos: 0, pending_rx: false, pending_tx: false, write_buf: None, need_flush: false, } } pub fn ensure_read_capacity(&mut self, capacity: usize) { self.buf.ensure_capacity(capacity); } pub fn pending_tx(&self) -> bool { self.pending_tx } pub fn pending_rx(&self) -> bool { self.pending_rx } } impl<R> BufIo<R> where R: AsyncWrite + Unpin, { pub fn poll_finish_pending_write( self: Pin<&mut Self>, cx: &mut Context, ) -> Poll<io::Result<()>> { let this = self.get_mut(); if let Some(buf) = &mut this.write_buf { loop { if buf.is_empty() { break; } // we got stuff left to send let amount = match Pin::new(&mut this.inner).poll_write(cx, &buf[..]) { Poll::Pending => { trace!("poll_write: Pending"); this.pending_tx = true; return Poll::Pending; } Poll::Ready(v) => { trace!("poll_write: {:?}", v); this.pending_tx = false; v? } }; buf.consume(amount); } this.write_buf = None; } if this.need_flush { match Pin::new(&mut this.inner).poll_flush(cx) { Poll::Pending => { trace!("poll_flush: Pending"); this.pending_tx = true; return Poll::Pending; } Poll::Ready(v) => { trace!("poll_write: {:?}", v); this.pending_tx = false; v? } } this.need_flush = false; } Ok(()).into() } /// Check that a poll_write definitely won't reject with Pending before accepting pub fn can_poll_write(&self) -> bool { self.write_buf.is_none() && !self.need_flush && !self.pending_tx } /// Write all or none of a buffer. /// /// This poll_write variant write the entire buf to the underlying writer or nothing, /// potentially using an internal buffer for half written responses when hitting Pending. pub fn poll_write_all( self: Pin<&mut Self>, cx: &mut Context, buf: &mut Option<&[u8]>, flush: bool, ) -> Poll<io::Result<()>> { let this = self.get_mut(); // Any pending writes must be dealt with first. ready!(Pin::new(&mut *this).poll_finish_pending_write(cx))?; assert!(this.write_buf.is_none()); assert!(!this.need_flush); // Take ownership of the incoming buf. If we can't write it entirely we will // Allocate into a ConsumeBuf for the remainder. let buf = if let Some(buf) = buf.take() { buf } else { return Ok(()).into(); }; let mut pos = 0; loop { if pos == buf.len() { break; } match Pin::new(&mut this.inner).poll_write(cx, &buf[pos..]) { Poll::Pending => { trace!("poll_write: Pending"); // Half sent buffer, the rest is for poll_finish_pending_write. this.pending_tx = true; this.write_buf = Some(ConsumeBuf::new((&buf[pos..]).to_vec())); this.need_flush = flush; return Poll::Pending; } Poll::Ready(Err(e)) => { trace!("poll_write err: {:?}", e); this.pending_tx = false; return Err(e).into(); } Poll::Ready(Ok(amount)) => { trace!("poll_write sent: {}", amount); this.pending_tx = false; pos += amount; } } } if flush { match Pin::new(&mut this.inner).poll_flush(cx) { Poll::Pending => { trace!("poll_flush: Pending"); // Do this in poll_finish_pending_write later. this.pending_tx = true; this.need_flush = true; return Poll::Pending; } Poll::Ready(v) => { trace!("poll_flush: {:?}", v); this.pending_tx = false; v? } } } Ok(()).into() } } impl<R> BufIo<R> where R: AsyncRead + Unpin, { pub fn poll_fill_buf( self: Pin<&mut Self>, cx: &mut Context, force_append: bool, ) -> Poll<io::Result<&[u8]>> { let this = self.get_mut(); let cur_len = this.buf.len(); if cur_len == 0 || force_append { // when this reference is dropped, the buffer size is reset back. // this also extends the buffer with additional capacity if needed. let mut bref = this.buf.borrow(); let read_into = &mut bref[cur_len..]; match Pin::new(&mut this.inner).poll_read(cx, read_into) { Poll::Pending => { trace!("poll_read: Pending"); this.pending_rx = true; return Poll::Pending; } Poll::Ready(Err(e)) => { trace!("poll_read err: {:?}", e); this.pending_rx = false; return Err(e).into(); } Poll::Ready(Ok(amount)) => { trace!("poll_read amount: {}", amount); this.pending_rx = false; // If poll_read is correct, we really have written amount bytes // into the buf and this is safe. unsafe { bref.extend(amount); } } } } let buf = &this.buf[this.pos..]; Ok(buf).into() } pub fn can_take_read_buf(&self) -> bool { // can not take a partially read buf self.pos == 0 } pub fn take_read_buf(&mut self) -> Vec<u8> { let replace = FastBuf::with_capacity(self.buf.capacity()); let buf = mem::replace(&mut self.buf, replace); self.pos = 0; buf.into_vec() } pub fn consume(self: Pin<&mut Self>, amount: usize) { let this = self.get_mut(); let new_pos = this.pos + amount; // can't consume more than we have. assert!(new_pos <= this.buf.len()); if new_pos == this.buf.len() { // all was consumed, reset back to start. this.pos = 0; this.buf.empty(); } else { this.pos = new_pos; } } pub fn poll_read_buf( self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8], ) -> Poll<io::Result<usize>> { let this = self.get_mut(); let has_amount = this.buf.len() - this.pos; if has_amount > 0 { let amt = (&this.buf[this.pos..]).read(buf)?; trace!("poll_read_buf from buffer: {}", amt); this.pos += amt; // reset if all is used up. if this.pos == this.buf.len() { this.pos = 0; this.buf.empty(); } return Ok(amt).into(); } // once inner buffer is used up, read directly from underlying. match Pin::new(&mut this.inner).poll_read(cx, buf) { Poll::Pending => { trace!("poll_read: Pending"); this.pending_rx = true; Poll::Pending } r => { trace!("poll_read: {:?}", r); this.pending_rx = false; r } } } } // *********** BOILERPLATE BELOW ****************************** impl<R> AsyncRead for BufIo<R> where R: AsyncRead + Unpin, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8], ) -> Poll<io::Result<usize>> { let this = self.get_mut(); Pin::new(this).poll_read_buf(cx, buf) } } impl<R> AsyncWrite for BufIo<R> where R: AsyncWrite + Unpin, { fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll<io::Result<usize>> { let this = self.get_mut(); ready!(Pin::new(&mut *this).poll_finish_pending_write(cx))?; match Pin::new(&mut this.inner).poll_write(cx, buf) { Poll::Pending => { trace!("poll_write: Pending"); this.pending_tx = true; Poll::Pending } r => { trace!("poll_write: {:?}", r); this.pending_tx = false; r } } } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> { let this = self.get_mut(); ready!(Pin::new(&mut *this).poll_finish_pending_write(cx))?; match Pin::new(&mut this.inner).poll_flush(cx) { Poll::Pending => { trace!("poll_flush: Pending"); this.pending_tx = true; Poll::Pending } r => { trace!("poll_write: {:?}", r); this.pending_tx = false; r } } } fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> { let this = self.get_mut(); ready!(Pin::new(&mut *this).poll_finish_pending_write(cx))?; match Pin::new(&mut this.inner).poll_close(cx) { Poll::Pending => { trace!("poll_close: Pending"); this.pending_tx = true; Poll::Pending } r => { trace!("poll_close: {:?}", r); this.pending_tx = false; r } } } }
use ark_bls12_381::Fr; use ark_ff::{One, Zero}; use ark_poly::{ polynomial::univariate::DensePolynomial as Polynomial, EvaluationDomain, Polynomial as poly, UVPolynomial, }; // The quotient polynomial will encode the four checks for the multiset equality argument // These checks are: // 1) Z(X) evaluated at the first root of unity is 1 // 2) Z(X) is correct accumulated. z_(Xg) * g(X) = (1+beta)^n * z(X) * f(X) // 3) The last element of h_1(x) is equal to the first element of h_2(x) // 4) Z(x) evaluated at the last root of unity is 1 // // We can denote check 1 and check 4 as point checks because they are checking the evaluation of Z(x) at a specific point // We can denote check 3 as an interval check because it checks whether h_1 and h_2 combined form 's' without any gaps. See paper for more details on 's' // We can denote check 2 as the term check // // Notice that the term check equation will determine the degree of the quotient polynomial // We can compute it by adding the degrees of Z(x), f(x) and t(x). // deg(Z(x)) = n because it has n + 1 elements // deg(f(x)) = n although it has n elements, we must zero pad to ensure that f(x) evaluated on the n+1'th element is zero // deg(t(x)) = n because we define it to have n + 1 elements. // Summing the degrees gives us n + n + n = 3n // However, similar to [GWC19](PLONK) we must divide by the vanishing polynomial // So the degree of the quotient polynomial Q(x) is 3n - n = 2n // Significance: Adding this protocol into PLONK will not "blow up" the degree of the quotient polynomial // Where "blow up" denotes increasing the overall degree past 4n for standard plonk pub fn compute<E: EvaluationDomain<Fr>>( domain: &E, z_poly: &Polynomial<Fr>, f_poly: &Polynomial<Fr>, t_poly: &Polynomial<Fr>, h_1_poly: &Polynomial<Fr>, h_2_poly: &Polynomial<Fr>, beta: Fr, gamma: Fr, ) -> (Polynomial<Fr>, Polynomial<Fr>) { // 1. Compute Point check polynomial let point_check = compute_point_checks(z_poly, domain); //2. Compute interval check polynomial let interval_check = compute_interval_check(h_1_poly, h_2_poly, domain); //3. Compute term check polynomial let term_check = compute_term_check( domain, z_poly, f_poly, t_poly, h_1_poly, h_2_poly, beta, gamma, ); // Compute quotient polynomial let sum = &(&interval_check + &point_check) + &term_check; sum.divide_by_vanishing_poly(*domain).unwrap() } fn compute_point_checks<E: EvaluationDomain<Fr>>( z_poly: &Polynomial<Fr>, domain: &E, ) -> Polynomial<Fr> { // Compute lagrange polynomials let l1_poly = compute_n_lagrange_poly(domain, 0); let ln_poly = compute_n_lagrange_poly(domain, domain.size() - 1); // Compute Z'(X) = Z(x) - 1 let z_prime_poly = z_poly - &Polynomial::from_coefficients_vec(vec![Fr::one()]); // We can batch the two point checks into one with the following: (Z(X)-1)[L_1(x) + L_n(x)] // let l_poly = &l1_poly + &ln_poly; &z_prime_poly * &l_poly } fn compute_interval_check<E: EvaluationDomain<Fr>>( h_1_poly: &Polynomial<Fr>, h_2_poly: &Polynomial<Fr>, domain: &E, ) -> Polynomial<Fr> { // Increase domain size by two let domain_2n: E = EvaluationDomain::new(2 * domain.size()).unwrap(); // Compute last lagrange polynomial in evaluation form let ln_evals = compute_n_lagrange_evaluations(domain.size(), domain.size() - 1); let ln_2n_evals = domain_2n.fft(&domain.ifft(&ln_evals)); // Convert h_1 and h_2 to evaluation form let h_1_evals = domain_2n.fft(&h_1_poly); let mut h_2_evals = domain_2n.fft(&h_2_poly); // We need h_2(x * g) so push 2 extra elements into the domain h_2_evals.push(h_2_evals[0]); h_2_evals.push(h_2_evals[1]); // Compute [L_n(x)](h_1(x) - h_2(x * g)) let i_evals: Vec<_> = (0..domain_2n.size()) .into_iter() .map(|i| { let ln_i = ln_2n_evals[i]; let h_1_i = h_1_evals[i]; let h_2_i_next = h_2_evals[i + 2]; ln_i * (h_1_i - h_2_i_next) }) .collect(); // Convert the evaluations for our point check to coefficient form let i_poly = Polynomial::from_coefficients_vec(domain_2n.ifft(&i_evals)); i_poly } pub fn compute_term_check<E: EvaluationDomain<Fr>>( domain: &E, z_poly: &Polynomial<Fr>, f_poly: &Polynomial<Fr>, t_poly: &Polynomial<Fr>, h_1_poly: &Polynomial<Fr>, h_2_poly: &Polynomial<Fr>, beta: Fr, gamma: Fr, ) -> Polynomial<Fr> { // The equation for this is quite big. Similar to PLONK, we can split the point check into two. // The first part will compute the grand product Z(X) term // The second part will compute the grand product Z(Xg) term // First Part let part_a = compute_term_check_a(domain, z_poly, f_poly, t_poly, beta, gamma); // Second part let part_b = compute_term_check_b(domain, z_poly, h_1_poly, h_2_poly, beta, gamma); &part_a - &part_b } // This computes the grand product term for Z(X) or F(\beta, \gamma) fn compute_term_check_a<E: EvaluationDomain<Fr>>( domain: &E, z_poly: &Polynomial<Fr>, f_poly: &Polynomial<Fr>, t_poly: &Polynomial<Fr>, beta: Fr, gamma: Fr, ) -> Polynomial<Fr> { // Increase the domain size by 4 let domain_4n: &E = &EvaluationDomain::new(4 * domain.size()).unwrap(); // Convert all polynomials into evaluation form let z_evals = domain_4n.fft(&z_poly); let f_evals = domain_4n.fft(f_poly); let mut t_evals = domain_4n.fft(t_poly); // Add four terms to the t(x) evaluations as we need to compute t(Xg) t_evals.push(t_evals[0]); t_evals.push(t_evals[1]); t_evals.push(t_evals[2]); t_evals.push(t_evals[3]); let beta_one = Fr::one() + beta; // Compute the last element in the domain let g_n = domain.elements().last().unwrap(); let i_evals: Vec<_> = (0..domain_4n.size()) .into_iter() .zip(domain_4n.elements()) .map(|(i, root_i)| { let z_i = z_evals[i]; let f_i = f_evals[i]; let t_i = t_evals[i]; let t_i_next = t_evals[i + 4]; // Compute X - g^n let a = root_i - g_n; // Compute Z(X)(1+beta) let b = z_i * beta_one; // Compute gamma + f(X) let c = gamma + f_i; // Compute gamma(1+beta) +t(x) + beta * t(Xg) let d = (gamma * beta_one) + t_i + (beta * t_i_next); a * b * c * d }) .collect(); // Convert the evaluations for our term check to coefficient form let i_poly = Polynomial::from_coefficients_vec(domain_4n.ifft(&i_evals)); assert_eq!( i_poly.evaluate(&domain.elements().last().unwrap()), Fr::zero() ); i_poly } // This computes the grand product term for Z(Xg) or G(\beta, \gamma) fn compute_term_check_b<E: EvaluationDomain<Fr>>( domain: &E, z_poly: &Polynomial<Fr>, h_1_poly: &Polynomial<Fr>, h_2_poly: &Polynomial<Fr>, beta: Fr, gamma: Fr, ) -> Polynomial<Fr> { // Increase the domain size by 4 let domain_4n: &E = &EvaluationDomain::new(4 * domain.size()).unwrap(); // Convert all polynomials into evaluation form, then add four terms to each evaluation as we need to compute their evaluations at the next root of unity let mut z_evals = domain_4n.fft(z_poly); z_evals.push(z_evals[0]); z_evals.push(z_evals[1]); z_evals.push(z_evals[2]); z_evals.push(z_evals[3]); let mut h_1_evals = domain_4n.fft(h_1_poly); h_1_evals.push(h_1_evals[0]); h_1_evals.push(h_1_evals[1]); h_1_evals.push(h_1_evals[2]); h_1_evals.push(h_1_evals[3]); let mut h_2_evals = domain_4n.fft(h_2_poly); h_2_evals.push(h_2_evals[0]); h_2_evals.push(h_2_evals[1]); h_2_evals.push(h_2_evals[2]); h_2_evals.push(h_2_evals[3]); // Compute (1 + beta) let beta_one = Fr::one() + beta; // Compute the last element in the domain let g_n = domain.elements().last().unwrap(); let i_evals: Vec<_> = (0..domain_4n.size()) .into_iter() .zip(domain_4n.elements()) .map(|(i, root_i)| { let z_i_next = z_evals[i + 4]; let h_1_i = h_1_evals[i]; let h_1_i_next = h_1_evals[i + 4]; let h_2_i = h_2_evals[i]; let h_2_i_next = h_2_evals[i + 4]; // Compute (X - g^n) Z(Xg) let a = (root_i - g_n) * z_i_next; // Compute [gamma * (1+beta)] + h_1(x) + beta * h_1(Xg) let b = (gamma * beta_one) + h_1_i + (beta * h_1_i_next); // Compute [gamma * (1+beta)] + h_2(x) + beta * h_2(Xg) let c = (gamma * beta_one) + h_2_i + (beta * h_2_i_next); a * b * c }) .collect(); // Convert the evaluations for our term check to coefficient form let i_poly = Polynomial::from_coefficients_vec(domain_4n.ifft(&i_evals)); assert_eq!( i_poly.evaluate(&domain.elements().last().unwrap()), Fr::zero() ); i_poly } // Computes the n'th lagrange poly for a particular domain // Easiest way is to compute the evaluation points, which will be zero at every position except for n // Then IFFT to get the coefficient form // Note: n=0 is the first lagrange polynomial and n = domain.size() -1 is the last lagrange polynomial pub fn compute_n_lagrange_poly<E: EvaluationDomain<Fr>>(domain: &E, n: usize) -> Polynomial<Fr> { assert!(n <= domain.size() - 1); let mut evaluations = compute_n_lagrange_evaluations(domain.size(), n); domain.ifft_in_place(&mut evaluations); Polynomial::from_coefficients_vec(evaluations) } fn compute_n_lagrange_evaluations(domain_size: usize, n: usize) -> Vec<Fr> { let mut lagrange_evaluations = vec![Fr::zero(); domain_size]; lagrange_evaluations[n] = Fr::one(); lagrange_evaluations } #[cfg(test)] mod test { use ark_poly::Radix2EvaluationDomain; use super::*; use crate::multiset::{multiset_equality::*, MultiSet}; #[test] fn test_quotient_poly() { // Compute f let mut f = MultiSet::new(); f.push(Fr::from(2u8)); f.push(Fr::from(3u8)); f.push(Fr::from(4u8)); // Compute t let mut t = MultiSet::new(); t.push(Fr::from(2u8)); t.push(Fr::from(3u8)); t.push(Fr::from(4u8)); t.push(Fr::from(5u8)); // Setup domain let domain: Radix2EvaluationDomain<Fr> = EvaluationDomain::new(f.len()).unwrap(); let beta = Fr::from(10u8); let gamma = Fr::from(11u8); // Compute h_1 and h_2 let (h_1, h_2) = compute_h1_h2(&f, &t); // Convert h_1 and h_2 to polynomials let h_1_poly = h_1.to_polynomial(&domain); let h_2_poly = h_2.to_polynomial(&domain); // Compute f(x) let f_poly = f.to_polynomial(&domain); assert_eq!(f_poly.degree(), f.len()); // Compute t(x) let t_poly = t.to_polynomial(&domain); assert_eq!(t_poly.degree(), t.len() - 1); // Compute Z(x) poly let z_evaluations = compute_accumulator_values(&f, &t, &h_1, &h_2, beta, gamma); let z_poly = Polynomial::from_coefficients_vec(domain.ifft(&z_evaluations)); let (_, remainder) = compute( &domain, &z_poly, &f_poly, &t_poly, &h_1_poly, &h_2_poly, beta, gamma, ); assert!(remainder.is_zero()); } }
use std::fmt; use std::iter::FusedIterator; use log::info; /// Advanced iter is and iterator that is advanced one. It is like Peekable<T> except the peek item /// is already advanced. #[derive(Debug, Clone)] pub struct AdvancedIter<T: Iterator> { iter: T, peek_item: Option<T::Item>, peek_pos: Option<usize>, current_pos: Option<usize>, } impl<T: Iterator> AdvancedIter<T> { pub fn new(mut iter: T) -> AdvancedIter<T> { let peek_item = iter.next(); let current_pos = None; let peek_pos = Some(0); AdvancedIter { iter, peek_item, peek_pos, current_pos, } } pub fn peek_pos(&self) -> Option<usize> { self.peek_pos } pub fn current_pos(&self) -> Option<usize> { self.current_pos } pub fn peek_item(&self) -> Option<&<Self as Iterator>::Item> { self.peek_item.as_ref() } } impl<T: Iterator> Iterator for AdvancedIter<T> { type Item = T::Item; fn next(&mut self) -> Option<Self::Item> { let res = self.peek_item.take(); self.current_pos = self.peek_pos; match self.iter.next() { Some(item) => { self.peek_pos = Some(self.peek_pos.unwrap() + 1); self.peek_item = Some(item); } None => { self.peek_pos = None; self.peek_item = None; } } res } } pub trait Peekable: Iterator { fn peek(&self) -> Option<&Self::Item>; } impl<T: Iterator> Peekable for AdvancedIter<T> { fn peek(&self) -> Option<&Self::Item> { self.peek_item.as_ref() } } impl<T: Iterator> FusedIterator for AdvancedIter<T> {} pub trait Accept<T: PartialEq + fmt::Debug>: Iterator<Item = T> + Peekable { fn accept(&mut self, valid: Self::Item) -> bool { let peeked = self.peek(); match peeked { Some(c) if c == &valid => { info!("char `{:?}` is accepted", c); self.next(); // consume the token true } _ => { info!("char `{:?}` is not accepted", self.peek()); false } } } // fn accept_return(&mut self, valid: Self::Item) -> Result<Self::Item, Option<&Self::Item>> { // let peeked = self.peek(); // match peeked { // Some(c) if c == &valid => { // info!("char `{:?}` is accepted", c); // let next = self.next(); // Ok(self.next().expect("BUG: should have some after peek")) // } // item => { // info!("char `{:?}` is not accepted", self.peek()); // Err(item) // } // } // } // fn accept_or<E>(&mut self, valid: Self::Item, err: E) -> Result<(), E> // where // E: std::error::Error, // { // if self.accept(valid) { // Ok(()) // } else { // Err(err) // } // } /// Accepts while predicate returns true. Does not accept the char the predicate returns /// false for. fn accept_while(&mut self, predicate: impl Fn(&Self::Item) -> bool) { while let Some(c) = self.peek() { if !predicate(&c) { info!("char `{:?}` is not accepted", c); break; } else { info!("char `{:?}` is accepted", c); self.next(); } } } } impl<T, U> Accept<U> for T where T: Iterator<Item = U> + Peekable, U: PartialEq + fmt::Debug, { } #[cfg(test)] mod tests { use super::*; #[test] fn advanced_iter_simple() { let chars = "hi".chars(); let mut advanced_iter = AdvancedIter::new(chars); assert_eq!(advanced_iter.next(), Some('h')); } }
use nom::error::ErrorKind; use std::io::Error as IoError; use std::io::ErrorKind as IoErrorKind; use std::io::ErrorKind::InvalidInput; pub type ParserResult<'a, T> = Result<(&'a [u8], T), nom::Err<(&'a [u8], ErrorKind)>>; pub fn slice_to_string(slice: &[u8]) -> Result<String, IoError> { if slice.is_empty() { Err(IoError::new(InvalidInput, "slice has length 0")) } else { Ok( String::from_utf8(Vec::from(slice)) .map_err(|_| IoError::new(IoErrorKind::InvalidInput, "Failed to parse utf8 string"))? ) } } pub fn parse_u32(slice: &[u8]) -> Result<u32, IoError> { Ok( ::std::str::from_utf8(slice) .map_err(|_| IoError::new(IoErrorKind::InvalidInput, "Failed to parse utf8 u32 integer"))? .parse() .map_err(|_| IoError::new(IoErrorKind::InvalidInput, "Failed to parse u32 integer"))? ) } pub fn parse_u64(slice: &[u8]) -> Result<u64, IoError> { Ok( ::std::str::from_utf8(slice) .map_err(|_| IoError::new(IoErrorKind::InvalidInput, "Failed to parse utf8 u64 integer"))? .parse() .map_err(|_| IoError::new(IoErrorKind::InvalidInput, "Failed to parse u64 integer"))? ) }
use shader_roy_metal_sl_interface::*; // --- SDF utility library pub fn subtract(d1: f32, d2: f32) -> f32 { -d1.max(d2) } pub fn sd_sphere(p: Vec3, center: Vec3, radius: f32) -> f32 { p.distance(center) - radius } pub fn sd_box(p: Vec2, pos: Vec2, size: Vec2) -> f32 { let d: Vec2 = (p - pos).abs() - size; d.x.clamped(d.y, 0.0) + d.max(0.0).magnitude() } // polynomial smooth min (k = 0.1); pub fn smin_cubic(a: f32, b: f32, k: f32) -> f32 { let h: f32 = k - (a - b).abs().max(0.0); a.min(b) - h * h * h / (6.0 * k * k) } pub fn op_u(d1: f32, d2: f32) -> f32 { d1.min(d2) } pub fn op_blend(d1: f32, d2: f32) -> f32 { let k: f32 = 0.2; smin_cubic(d1, d2, k) }
/// An enum to represent all characters in the Tagbanwa block. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum Tagbanwa { /// \u{1760}: 'ᝠ' LetterA, /// \u{1761}: 'ᝡ' LetterI, /// \u{1762}: 'ᝢ' LetterU, /// \u{1763}: 'ᝣ' LetterKa, /// \u{1764}: 'ᝤ' LetterGa, /// \u{1765}: 'ᝥ' LetterNga, /// \u{1766}: 'ᝦ' LetterTa, /// \u{1767}: 'ᝧ' LetterDa, /// \u{1768}: 'ᝨ' LetterNa, /// \u{1769}: 'ᝩ' LetterPa, /// \u{176a}: 'ᝪ' LetterBa, /// \u{176b}: 'ᝫ' LetterMa, /// \u{176c}: 'ᝬ' LetterYa, /// \u{176e}: 'ᝮ' LetterLa, /// \u{176f}: 'ᝯ' LetterWa, /// \u{1770}: 'ᝰ' LetterSa, /// \u{1772}: 'ᝲ' VowelSignI, /// \u{1773}: 'ᝳ' VowelSignU, } impl Into<char> for Tagbanwa { fn into(self) -> char { match self { Tagbanwa::LetterA => 'ᝠ', Tagbanwa::LetterI => 'ᝡ', Tagbanwa::LetterU => 'ᝢ', Tagbanwa::LetterKa => 'ᝣ', Tagbanwa::LetterGa => 'ᝤ', Tagbanwa::LetterNga => 'ᝥ', Tagbanwa::LetterTa => 'ᝦ', Tagbanwa::LetterDa => 'ᝧ', Tagbanwa::LetterNa => 'ᝨ', Tagbanwa::LetterPa => 'ᝩ', Tagbanwa::LetterBa => 'ᝪ', Tagbanwa::LetterMa => 'ᝫ', Tagbanwa::LetterYa => 'ᝬ', Tagbanwa::LetterLa => 'ᝮ', Tagbanwa::LetterWa => 'ᝯ', Tagbanwa::LetterSa => 'ᝰ', Tagbanwa::VowelSignI => 'ᝲ', Tagbanwa::VowelSignU => 'ᝳ', } } } impl std::convert::TryFrom<char> for Tagbanwa { type Error = (); fn try_from(c: char) -> Result<Self, Self::Error> { match c { 'ᝠ' => Ok(Tagbanwa::LetterA), 'ᝡ' => Ok(Tagbanwa::LetterI), 'ᝢ' => Ok(Tagbanwa::LetterU), 'ᝣ' => Ok(Tagbanwa::LetterKa), 'ᝤ' => Ok(Tagbanwa::LetterGa), 'ᝥ' => Ok(Tagbanwa::LetterNga), 'ᝦ' => Ok(Tagbanwa::LetterTa), 'ᝧ' => Ok(Tagbanwa::LetterDa), 'ᝨ' => Ok(Tagbanwa::LetterNa), 'ᝩ' => Ok(Tagbanwa::LetterPa), 'ᝪ' => Ok(Tagbanwa::LetterBa), 'ᝫ' => Ok(Tagbanwa::LetterMa), 'ᝬ' => Ok(Tagbanwa::LetterYa), 'ᝮ' => Ok(Tagbanwa::LetterLa), 'ᝯ' => Ok(Tagbanwa::LetterWa), 'ᝰ' => Ok(Tagbanwa::LetterSa), 'ᝲ' => Ok(Tagbanwa::VowelSignI), 'ᝳ' => Ok(Tagbanwa::VowelSignU), _ => Err(()), } } } impl Into<u32> for Tagbanwa { fn into(self) -> u32 { let c: char = self.into(); let hex = c .escape_unicode() .to_string() .replace("\\u{", "") .replace("}", ""); u32::from_str_radix(&hex, 16).unwrap() } } impl std::convert::TryFrom<u32> for Tagbanwa { type Error = (); fn try_from(u: u32) -> Result<Self, Self::Error> { if let Ok(c) = char::try_from(u) { Self::try_from(c) } else { Err(()) } } } impl Iterator for Tagbanwa { type Item = Self; fn next(&mut self) -> Option<Self> { let index: u32 = (*self).into(); use std::convert::TryFrom; Self::try_from(index + 1).ok() } } impl Tagbanwa { /// The character with the lowest index in this unicode block pub fn new() -> Self { Tagbanwa::LetterA } /// The character's name, in sentence case pub fn name(&self) -> String { let s = std::format!("Tagbanwa{:#?}", self); string_morph::to_sentence_case(&s) } }
use super::expression::Expression; use super::reference::LocalReference; use super::variable::GlobalVariable; use crate::ast_transform::Transformer; use crate::scm::Scm; use crate::source::SourceLocation; use crate::syntax::Reify; use crate::utils::{Named, Sourced}; sum_type! { #[derive(Debug, Clone)] pub type Assignment(Expression) = LocalAssignment | GlobalAssignment; } impl Assignment { pub fn default_transform(self, visitor: &mut impl Transformer) -> Self { use Assignment::*; match self { LocalAssignment(x) => x.default_transform(visitor).into(), GlobalAssignment(x) => x.default_transform(visitor).into(), } } } impl Sourced for Assignment { fn source(&self) -> &SourceLocation { use Assignment::*; match self { LocalAssignment(x) => &x.span, GlobalAssignment(x) => &x.span, } } } #[derive(Debug, Clone)] pub struct LocalAssignment { pub reference: LocalReference, pub form: Box<Expression>, pub span: SourceLocation, } impl_sourced!(LocalAssignment); impl LocalAssignment { pub fn new( reference: LocalReference, form: impl Into<Box<Expression>>, span: SourceLocation, ) -> Self { LocalAssignment { reference, form: form.into(), span, } } pub fn default_transform(mut self, visitor: &mut impl Transformer) -> Self { *self.form = self.form.transform(visitor); self } } #[derive(Debug, Clone)] pub struct GlobalAssignment { pub variable: GlobalVariable, pub form: Box<Expression>, span: SourceLocation, } impl_sourced!(GlobalAssignment); impl GlobalAssignment { pub fn new( variable: GlobalVariable, form: impl Into<Box<Expression>>, span: SourceLocation, ) -> Self { GlobalAssignment { variable, form: form.into(), span, } } pub fn default_transform(mut self, visitor: &mut impl Transformer) -> Self { *self.form = self.form.transform(visitor); self } } impl Reify for Assignment { fn reify(&self) -> Scm { let var = match self { Assignment::LocalAssignment(a) => Scm::Symbol(a.reference.var.name()), Assignment::GlobalAssignment(a) => Scm::Symbol(a.variable.name()), }; let val = match self { Assignment::LocalAssignment(a) => a.form.reify(), Assignment::GlobalAssignment(a) => a.form.reify(), }; Scm::list(vec![Scm::symbol("set!"), var, val]) } }
use std::io; fn celsius_to_farenheit(deg: i32) -> i32 { return ((deg as f32 * 1.8) + 32.0) as i32; } fn farenheit_to_celsius(deg: i32) -> i32 { return ((deg as f32 - 32.0) / 1.8) as i32; } fn main() { let mut current_units = String::new(); let mut degrees = String::new(); println!("input your current units"); io::stdin() .read_line(&mut current_units) .expect("Failed to read line"); let current_units = current_units.trim(); println!("Input number of degrees"); io::stdin() .read_line(&mut degrees) .expect("Failed to read line"); let degrees: i32 = degrees.trim().parse().expect("Please type a number"); match current_units { "C" => println!( "you entered {} degrees celsius, which is {} farenheit", degrees, celsius_to_farenheit(degrees) ), "F" => println!( "you entered {} degrees farenheit, which is {} celsius", degrees, farenheit_to_celsius(degrees) ), _ => println!("please enter \"C\" or \"F\", you entered {}", current_units), } }
use super::interface::{LinkReader, LinkWriter}; use smol::Async; use std::{ hash, net::{SocketAddr, UdpSocket}, }; use yggy_core::dev::*; #[derive(Debug)] pub struct UDPSocket { addr: SocketAddr, socket: Async<UdpSocket>, } impl UDPSocket { #[inline] pub fn bind(addr: SocketAddr) -> Result<Self, Error> { let socket = Async::<UdpSocket>::bind(addr).map_err(ConnError::Interface)?; Ok(Self { addr, socket }) } #[inline] pub fn connect(&self, addr: &SocketAddr) -> Result<(), Error> { Ok(self .socket .get_ref() .connect(addr) .map_err(ConnError::Interface)?) } #[inline] pub fn local_addr(&self) -> &SocketAddr { &self.addr } #[inline] pub fn remote_addr(&self) -> Result<SocketAddr, Error> { Ok(self .socket .get_ref() .peer_addr() .map_err(ConnError::Interface)?) } #[inline] pub fn split(self) -> (LinkReader, LinkWriter) { let (r, w) = io::AsyncReadExt::split(self); (LinkReader::UDP(r), LinkWriter::UDP(w)) } } impl Eq for UDPSocket {} impl PartialEq for UDPSocket { fn eq(&self, other: &Self) -> bool { self.addr == other.addr } } impl hash::Hash for UDPSocket { #[inline] fn hash<H: hash::Hasher>(&self, state: &mut H) { self.addr.hash(state); } } impl AsyncRead for UDPSocket { #[inline] fn poll_read( mut self: Pin<&mut Self>, cx: &mut task::Context, buf: &mut [u8], ) -> task::Poll<Result<usize, io::Error>> { // If we're connected to a remote addr, // if let Some(addr) = self.remote_addr().ok() { // } else { // } // let reader = self.socket; // futures::pin_mut!(reader); // reader.poll_read(cx, buf) unimplemented!() } } impl AsyncWrite for UDPSocket { #[inline] fn poll_write( mut self: Pin<&mut Self>, cx: &mut task::Context, buf: &[u8], ) -> task::Poll<Result<usize, io::Error>> { // let writer = &mut self.writer; // futures::pin_mut!(writer); // writer.poll_write(cx, buf) unimplemented!() } #[inline] fn poll_flush( mut self: Pin<&mut Self>, cx: &mut task::Context, ) -> task::Poll<Result<(), io::Error>> { // let writer = &mut self.writer; // futures::pin_mut!(writer); // writer.poll_flush(cx) unimplemented!() } #[inline] fn poll_close( mut self: Pin<&mut Self>, cx: &mut task::Context, ) -> task::Poll<Result<(), io::Error>> { // let writer = &mut self.writer; // futures::pin_mut!(writer); // writer.poll_close(cx) unimplemented!() } }
use std::io::{Result, Write}; use pulldown_cmark::{Tag, Event}; use crate::gen::{self, State, States, Generator, Document}; #[derive(Debug)] pub struct List { start: Option<usize>, } impl<'a> State<'a> for List { fn new(tag: Tag<'a>, gen: &mut Generator<'a, impl Document<'a>, impl Write>) -> Result<Self> { let start = match tag { Tag::List(start) => start, _ => unreachable!("List::new must be called with Tag::List"), }; if let Some(start) = start { let start = start as i32 - 1; let enumerate_depth = 1 + gen.iter_stack().filter(|state| state.is_enumerate_list()).count(); writeln!(gen.get_out(), "\\begin{{enumerate}}")?; writeln!(gen.get_out(), "\\setcounter{{enum{}}}{{{}}}", "i".repeat(enumerate_depth), start)?; } else { writeln!(gen.get_out(), "\\begin{{itemize}}")?; } Ok(List { start, }) } fn finish(self, gen: &mut Generator<'a, impl Document<'a>, impl Write>, peek: Option<&Event<'a>>) -> Result<()> { if self.start.is_some() { writeln!(gen.get_out(), "\\end{{enumerate}}")?; } else { writeln!(gen.get_out(), "\\end{{itemize}}")?; } Ok(()) } } impl<'a> gen::List<'a> for List { fn is_enumerate(&self) -> bool { self.start.is_some() } } #[derive(Debug)] pub struct Item; impl<'a> State<'a> for Item { fn new(tag: Tag<'a>, gen: &mut Generator<'a, impl Document<'a>, impl Write>) -> Result<Self> { write!(gen.get_out(), "\\item ")?; Ok(Item) } fn finish(self, gen: &mut Generator<'a, impl Document<'a>, impl Write>, peek: Option<&Event<'a>>) -> Result<()> { writeln!(gen.get_out())?; Ok(()) } }
mod create_table_sql_for_navi; pub(in crate::sqlite::transaction::sqlite_tx::version_revision_resolver) mod navi; mod navi_table_name; use std::{cell::RefCell, rc::Rc}; use crate::sqlite::{ sqlite_rowid::SqliteRowid, sqlite_types::VrrEntries, to_sql_string::ToSqlString, transaction::sqlite_tx::SqliteTx, }; use apllodb_immutable_schema_engine_domain::{ row::pk::{apparent_pk::ApparentPrimaryKey, full_pk::revision::Revision}, version::id::VersionId, vtable::{id::VTableId, VTable}, }; use apllodb_shared_components::{ApllodbResult, SqlType}; use apllodb_storage_engine_interface::{ColumnDataType, ColumnName}; use create_table_sql_for_navi::CreateTableSqlForNavi; use self::{ navi::{ExistingNaviWithPk, Navi}, navi_table_name::NaviTableName, }; #[derive(Debug)] pub(in crate::sqlite::transaction::sqlite_tx::version_revision_resolver) struct NaviDao { sqlite_tx: Rc<RefCell<SqliteTx>>, } const CNAME_ROWID: &str = "rowid"; // SQLite's keyword const CNAME_REVISION: &str = "revision"; const CNAME_VERSION_NUMBER: &str = "version_number"; impl NaviDao { pub(in crate::sqlite::transaction::sqlite_tx::version_revision_resolver) fn new( sqlite_tx: Rc<RefCell<SqliteTx>>, ) -> Self { Self { sqlite_tx } } pub(in crate::sqlite::transaction::sqlite_tx::version_revision_resolver) async fn create_table( &self, vtable: &VTable, ) -> ApllodbResult<()> { let sql = CreateTableSqlForNavi::from(vtable); self.sqlite_tx.borrow_mut().execute(sql.as_str()).await?; Ok(()) } pub(in crate::sqlite::transaction::sqlite_tx::version_revision_resolver) async fn full_scan_latest_revision( &self, vtable: &VTable, ) -> ApllodbResult<Vec<ExistingNaviWithPk>> { let navi_table_name = NaviTableName::from(vtable.table_name().clone()); let sql = format!( " SELECT {pk_column_names}, {cname_rowid}, {cname_revision}, {cname_version_number} FROM {navi_table_name} GROUP BY {pk_column_names} HAVING {cname_revision} = MAX({cname_revision}) AND {cname_version_number} IS NOT NULL ", pk_column_names = vtable .table_wide_constraints() .pk_column_names() .to_sql_string(), cname_rowid = CNAME_ROWID, cname_revision = CNAME_REVISION, cname_version_number = CNAME_VERSION_NUMBER, navi_table_name = navi_table_name.to_sql_string(), ); let cdt_rowid = self.cdt_rowid(); let cdt_revision = self.cdt_revision(); let cdt_version_number = self.cdt_version_number(); let mut column_data_types = vec![&cdt_rowid, &cdt_revision, &cdt_version_number]; for pk_cdt in vtable.table_wide_constraints().pk_column_data_types() { column_data_types.push(pk_cdt); } let rows = self .sqlite_tx .borrow_mut() .query( &sql, &navi_table_name.to_table_name(), &column_data_types, &[], ) .await?; let schema = rows.as_schema().clone(); let ret: Vec<ExistingNaviWithPk> = rows .map(|r| ExistingNaviWithPk::from_navi_row(vtable, &schema, r)) .collect::<ApllodbResult<Vec<Option<ExistingNaviWithPk>>>>()? .into_iter() .flatten() .collect(); Ok(ret) } pub(in crate::sqlite::transaction::sqlite_tx::version_revision_resolver) async fn probe_latest_revision( &self, vtable_id: &VTableId, apk: &ApparentPrimaryKey, ) -> ApllodbResult<Navi> { let navi_table_name = NaviTableName::from(vtable_id.table_name().clone()); let sql = format!( " SELECT {cname_rowid}, {cname_version_number}, {cname_revision} FROM {navi_table_name} AS {vtable_name} WHERE {apk_condition} ORDER BY {cname_revision} DESC LIMIT 1; ", // FIXME SQL-i cname_rowid = CNAME_ROWID, cname_revision = CNAME_REVISION, cname_version_number = CNAME_VERSION_NUMBER, navi_table_name = navi_table_name.to_sql_string(), vtable_name = vtable_id.table_name().to_sql_string(), apk_condition = apk.to_condition_expression()?.to_sql_string(), ); let cdt_rowid = self.cdt_rowid(); let cdt_revision = self.cdt_revision(); let cdt_version_number = self.cdt_version_number(); let column_data_types = vec![&cdt_rowid, &cdt_revision, &cdt_version_number]; let mut rows = self .sqlite_tx .borrow_mut() .query( &sql, &navi_table_name.to_table_name(), &column_data_types, &[], ) .await?; let schema = rows.as_schema().clone(); let navi = match rows.next() { None => Navi::NotExist, Some(mut r) => Navi::from_navi_row(&schema, &mut r)?, }; Ok(navi) } /// Returns lastly inserted row's ROWID. pub(in crate::sqlite::transaction::sqlite_tx::version_revision_resolver) async fn insert( &self, apk: &ApparentPrimaryKey, revision: &Revision, version_id: &VersionId, ) -> ApllodbResult<SqliteRowid> { let sql = format!( " INSERT INTO {navi_table_name} ({pk_column_names}, {cname_revision}, {cname_version_number}) VALUES ({pk_sql_values}, {revision}, {version_number}); ", // FIXME SQL-i navi_table_name = NaviTableName::from(version_id.vtable_id().table_name().clone()).to_sql_string(), pk_column_names = apk.column_names().to_sql_string(), cname_revision=CNAME_REVISION, cname_version_number = CNAME_VERSION_NUMBER, pk_sql_values = apk.sql_values().to_sql_string(), revision = revision.to_sql_string(), version_number = version_id.version_number().to_sql_string(), ); let rowid = self.sqlite_tx.borrow_mut().execute(&sql).await?; Ok(rowid) } pub(in crate::sqlite::transaction::sqlite_tx::version_revision_resolver) async fn insert_deleted_records( &self, vtable: &VTable, vrr_entries: VrrEntries, ) -> ApllodbResult<()> { for vrr_entry in vrr_entries { let sql = format!( " INSERT INTO {navi_table_name} ({pk_column_names}, {cname_revision}) SELECT {pk_column_names}, {cname_revision} + 1 AS {cname_revision} FROM {navi_table_name} AS {vtable_name} WHERE {vrr_entry_condition} ", cname_revision = CNAME_REVISION, navi_table_name = NaviTableName::from(vtable.table_name().clone()).to_sql_string(), pk_column_names = vtable .table_wide_constraints() .pk_column_names() .to_sql_string(), vtable_name = vtable.table_name().to_sql_string(), vrr_entry_condition = vrr_entry .to_condition_expression(self.cdt_revision().column_name())? .to_sql_string(), ); let _ = self.sqlite_tx.borrow_mut().execute(&sql).await?; } Ok(()) } pub(in crate::sqlite::transaction::sqlite_tx::version_revision_resolver) async fn insert_deleted_records_all( &self, vtable: &VTable, ) -> ApllodbResult<()> { let sql = format!( " INSERT INTO {navi_table_name} ({pk_column_names}, {cname_revision}) SELECT {pk_column_names}, {cname_revision} + 1 AS {cname_revision} FROM {navi_table_name} GROUP BY {pk_column_names} HAVING {cname_revision} = MAX({cname_revision}) AND {cname_version_number} IS NOT NULL ", cname_revision = CNAME_REVISION, cname_version_number = CNAME_VERSION_NUMBER, navi_table_name = NaviTableName::from(vtable.table_name().clone()).to_sql_string(), pk_column_names = vtable .table_wide_constraints() .pk_column_names() .to_sql_string(), ); let _ = self.sqlite_tx.borrow_mut().execute(&sql).await?; Ok(()) } fn cdt_rowid(&self) -> ColumnDataType { ColumnDataType::new( ColumnName::new(CNAME_ROWID).unwrap(), SqlType::big_int(), false, ) } fn cdt_revision(&self) -> ColumnDataType { ColumnDataType::new( ColumnName::new(CNAME_REVISION).unwrap(), SqlType::big_int(), false, ) } fn cdt_version_number(&self) -> ColumnDataType { ColumnDataType::new( ColumnName::new(CNAME_VERSION_NUMBER).unwrap(), SqlType::big_int(), true, ) } }
use common::ids::{PageId, SlotId}; use common::PAGE_SIZE; use std::convert::TryInto; use std::mem; /// The struct for a page. Note this can hold more elements/meta data when created, /// but it must be able to be packed/serialized/marshalled into the data array of size /// PAGE_SIZE. In the header, you are allowed to allocate 8 bytes for general page metadata and /// 6 bytes per value/entry/slot stored. For example a page that has stored 3 values, can use /// up to 8+3*6=26 bytes, leaving the rest (PAGE_SIZE-26 for data) when serialized. /// You do not need reclaim header information for a value inserted (eg 6 bytes per value ever inserted) /// The rest must filled as much as possible to hold values. pub(crate) struct Page { /// The data for data page_id : PageId, header_len : u16, header : Vec<HeaderTupleT>, data : [u8; PAGE_SIZE], } struct HeaderTuple { slot_id : SlotId, index : u16, length : u16, } type HeaderTupleT = HeaderTuple; /// The functions required for page impl Page { /// Create a new page pub fn new(page_id: PageId) -> Self { let page = Page { page_id, header_len : 0, header : Vec::new(), data : [0; PAGE_SIZE] }; return page; } /// Return the page id for a page pub fn get_page_id(&self) -> PageId { return self.page_id; } /// find_min_available_slot_id: This function returns the minimum available SlotId /// /// header: The header vector for our page /// /// returns: The minimum available SlotId pub fn find_min_available_slot_id(&mut self) -> SlotId { /* Loop through header vector to check if i is a used SlotId */ for i in 0..self.header.len() { let mut is_used = false; /* isUsed tracks if i is already used */ /* Loop again to check whether i matches against any SlotId in the header vector */ for j in 0..self.header.len() { if self.header[j].slot_id as usize == i { is_used = true; } } /* If is_used is true, we return i */ if is_used == false { return i as u16; } } /* Otherwise, return one greater than the number of SlotId's already in use */ return self.header.len() as u16; } /// Attempts to add a new value to this page if there is space available. /// Returns Some(SlotId) if it was inserted or None if there was not enough space. /// Note that where the bytes are stored in the page does not matter (heap), but it /// should not change the slotId for any existing value. This means that /// bytes in the page may not follow the slot order. /// If a slot is deleted you can replace the slotId. /// /// HINT: You can copy/clone bytes into a slice using the following function. /// They must have the same size. /// self.data[X..y].clone_from_slice(&bytes); pub fn add_value(&mut self, bytes: &Vec<u8>) -> Option<SlotId> { let mut num = 0; /* We are adding a new tuple in the nth position of the header vector */ let mut index: usize = 0; /* This is the index of the value we are adding */ let mut space_is_found = false; /* Represent whether we found free space */ /* Loop through header vector to find continguous free space to fit the bytes vector */ if self.header.len() > 1 { for i in 0..self.header.len() - 1 { /* Set temp_index to the end of a indexed piece of data */ let temp_index: usize = self.header[i].index as usize + self.header[i].length as usize; /* Calculate the number of bytes of free space between the end of header[i].index and the start of header[i + 1].index */ let free_space: usize = self.header[i + 1].index as usize - temp_index; /* If the amount of free_space is enough to fit bytes, then we break and assign temp_index to index */ if free_space >= bytes.len() { index = temp_index; space_is_found = true; num = i; break; } } } /* Find the maximum number of bytes that our data array can hold */ let max = PAGE_SIZE - 8 - 6 * self.header.len(); /* Return None if no index was found */ if space_is_found == false { if self.header.len() > 0 { let temp_index: usize = self.header[self.header.len() - 1].index as usize + self.header[self.header.len() - 1].length as usize; if max - temp_index < bytes.len() { return None; } index = temp_index; num = self.header.len(); } /* This will be the first value inserted in the data array */ else { /* Insert a value into data[0] */ index = 0; num = 0; } } /* Find the minimum available SlotId for this new value */ let slot_id: SlotId = Self::find_min_available_slot_id(self); /* Create a new tuple that represents the new value we're adding to the data array */ let header_entry: HeaderTupleT = HeaderTuple {slot_id: slot_id, index: index as u16, length: bytes.len() as u16}; /* Insert a new tuple into the header vector at the ith position */ self.header.insert(num, header_entry); self.header_len += 1; /* Clone the bytes into the page's data array */ self.data[index..index + bytes.len()].clone_from_slice(&bytes); /* Return the SlotId of the new value */ return Some(slot_id); } /// Return the bytes for the slotId. If the slotId is not valid then return None pub fn get_value(&self, slot_id: SlotId) -> Option<Vec<u8>> { /* The index of the slice we are returning */ let index: usize; /* Find the tuple in our header that matches the slot_id */ for i in 0..self.header.len() { /* Check if header[i].slot_id matches slot_id */ if self.header[i].slot_id == slot_id { index = self.header[i].index as usize; /* Return a vector of that slice if we have found the right slot_id */ return Some(self.data[index..index+self.header[i].length as usize].to_vec()); } } /* Return None if the slot_id is not found */ return None; } /// Delete the bytes/slot for the slotId. If the slotId is not valid then return None /// HINT: Return Some(()) for a valid delete pub fn delete_value(&mut self, slot_id: SlotId) -> Option<()> { /* Find the tuple in our header that matches the slot_id */ for i in 0..self.header.len() { /* Check if header[i].slot_id matches slot_id */ if self.header[i].slot_id == slot_id { /* Remove this tuple from our header vector */ self.header.remove(i); return Some(()); } } /* Return None if the slot_id is not found */ return None; } /// Create a new page from the byte array. /// /// HINT to create a primitive data type from a slice you can use the following /// (the example is for a u16 type and the data store in little endian) /// u16::from_le_bytes(data[X..Y].try_into().unwrap()); pub fn from_bytes(data: &[u8]) -> Self { /* Get the page_id from the byte array */ let page_id: PageId = PageId::from_le_bytes(data[0..mem::size_of::<PageId>()].try_into().unwrap()); /* Get the header_len from the byte array */ let header_len: u16 = u16::from_le_bytes(data[mem::size_of::<PageId>()..mem::size_of::<PageId>() + 2].try_into().unwrap()); /* Create a new empty page struct given the page_id we just parsed */ let mut new_page = Page { page_id: page_id, header_len: header_len, header: Vec::new(), data: [0; PAGE_SIZE] }; for i in 0..header_len as usize { /* Position of the start of the slot field */ let slot_pos: usize = 6 * i + 8; /* Position of the start of the index field */ let index_pos: usize = slot_pos + mem::size_of::<SlotId>(); /* Position of the start of the length field */ let length_pos: usize = index_pos + 2; /* Fill fields with respective values from the bytes array */ let slot_id: SlotId = SlotId::from_le_bytes(data[slot_pos..index_pos].try_into().unwrap()); let index: u16 = u16::from_le_bytes(data[index_pos..length_pos].try_into().unwrap()); let length: u16 = u16::from_le_bytes(data[length_pos..length_pos + 2].try_into().unwrap()); /* Create a new HeaderTupleT */ let header_entry = HeaderTupleT { slot_id: slot_id, index: index, length: length, }; /* Append the newly created header_entry to the end of our header vector */ new_page.header.push(header_entry); } /* Position of the start of the data array */ let data_pos: usize = 8 + 6 * header_len as usize; /* Copy the values from data into the data array in our newly created page struct */ for i in 0..data.len() - data_pos { /* Copying the information in data into new_page.data byte by byte */ new_page.data[i..i+1].clone_from_slice(&data[data_pos + i..data_pos + i + 1]); } /* Return the newly created page struct */ return new_page; } /// Convert a page into bytes. This must be same size as PAGE_SIZE. /// We use a Vec<u8> for simplicity here. /// /// HINT: To convert a vec of bytes using little endian, use /// to_le_bytes().to_vec() pub fn get_bytes(&self) -> Vec<u8> { /* Create an empty vector */ let mut vec = Vec::new(); /* Create a two-byte array representing the page_id */ vec.extend(self.page_id.to_le_bytes().iter().cloned()); /* Create a two-byte array representing the header_len */ vec.extend(self.header_len.to_le_bytes().iter().cloned()); /* Pad the header with four bytes of zeros */ let zero: u32 = 0; vec.extend(zero.to_le_bytes().iter().cloned()); /* Loop through the header array and add slot_id, index, and length into the vector */ for i in 0..self.header_len as usize { vec.extend(self.header[i].slot_id.to_le_bytes().iter().cloned()); vec.extend(self.header[i].index.to_le_bytes().iter().cloned()); vec.extend(self.header[i].length.to_le_bytes().iter().cloned()); } /* Add the bytes from the data array into our vector */ let data_array_max_size = PAGE_SIZE - 8 - 6 * self.header.len(); vec.extend(self.data[0..data_array_max_size].iter().cloned()); /* Return the newly created vector */ return vec; } /// A utility function to determine the size of the header in the page /// when serialized/to_bytes. /// Will be used by tests. Optional for you to use in your code #[allow(dead_code)] pub(crate) fn get_header_size(&self) -> usize { return self.header.len() as usize * 6 + 8; } /// A utility function to determine the largest block of free space in the page. /// Will be used by tests. Optional for you to use in your code #[allow(dead_code)] pub(crate) fn get_largest_free_contiguous_space(&self) -> usize { /* Max represents the largest number of bytes of contiguous free space in the page */ let mut max: usize = 0; /* Find contiguous free space within the already allocated bytes in our data array */ if self.header.len() > 1 { for i in 0..self.header.len() - 1 { /* Set temp_index to the end of a indexed piece of data */ let temp_index: usize = self.header[i].index as usize + self.header[i].length as usize; /* Calculate the number of bytes of free space between the end of header[i].index and the start of header[i + 1].index */ let free_space: usize = self.header[i + 1].index as usize - temp_index; /* If the amount of free_space is larger than max, set max to free_space */ if free_space >= max { max = free_space; } } } /* Maximum number of bytes that our data array can hold */ let data_array_max_size = PAGE_SIZE - 8 - 6 * self.header.len(); /* Check if max is less than the free space after the last allocated byte in our data array */ if self.header.len() == 0 { max = data_array_max_size; } else { let final_allocated_index: usize = self.header[self.header.len() - 1].index as usize + self.header[self.header.len() - 1].length as usize; let remaining_free_space: usize = data_array_max_size - final_allocated_index; if max < remaining_free_space { max = remaining_free_space; } } /* Return the largest number of bytes of continguous free space */ return max; } } /// The (consuming) iterator struct for a page. /// This should iterate through all valid values of the page. /// See https://stackoverflow.com/questions/30218886/how-to-implement-iterator-and-intoiterator-for-a-simple-struct pub struct PageIter { page: Page, index: usize, } /// The implementation of the (consuming) page iterator. impl Iterator for PageIter { type Item = Vec<u8>; fn next(&mut self) -> Option<Self::Item> { if self.index >= self.page.header.len() { return None; } let index = self.page.header[self.index].index as usize; let length = self.page.header[self.index].length as usize; let valid_value = self.page.data[index..index+length].to_vec(); self.index += 1; return Some(valid_value); } } /// The implementation of IntoIterator which allows an iterator to be created /// for a page. This should create the PageIter struct with the appropriate state/metadata /// on initialization. impl IntoIterator for Page { type Item = Vec<u8>; type IntoIter = PageIter; fn into_iter(self) -> Self::IntoIter { PageIter { page: self, index: 0, } } } #[cfg(test)] mod tests { use super::*; use common::testutil::init; use common::testutil::*; use common::Tuple; /// Limits how on how many bytes we can use for page metadata / header pub const FIXED_HEADER_SIZE: usize = 8; pub const HEADER_PER_VAL_SIZE: usize = 6; #[test] fn hs_page_create() { init(); let p = Page::new(0); assert_eq!(0, p.get_page_id()); assert_eq!( PAGE_SIZE - p.get_header_size(), p.get_largest_free_contiguous_space() ); } #[test] fn hs_page_simple_insert() { init(); let mut p = Page::new(0); let tuple = int_vec_to_tuple(vec![0, 1, 2]); let tuple_bytes = serde_cbor::to_vec(&tuple).unwrap(); let byte_len = tuple_bytes.len(); assert_eq!(Some(0), p.add_value(&tuple_bytes)); assert_eq!( PAGE_SIZE - byte_len - p.get_header_size(), p.get_largest_free_contiguous_space() ); let tuple_bytes2 = serde_cbor::to_vec(&tuple).unwrap(); assert_eq!(Some(1), p.add_value(&tuple_bytes2)); assert_eq!( PAGE_SIZE - p.get_header_size() - byte_len - byte_len, p.get_largest_free_contiguous_space() ); } #[test] fn hs_page_space() { init(); let mut p = Page::new(0); let size = 10; let bytes = get_random_byte_vec(size); assert_eq!(10, bytes.len()); assert_eq!(Some(0), p.add_value(&bytes)); assert_eq!( PAGE_SIZE - p.get_header_size() - size, p.get_largest_free_contiguous_space() ); assert_eq!(Some(1), p.add_value(&bytes)); assert_eq!( PAGE_SIZE - p.get_header_size() - size * 2, p.get_largest_free_contiguous_space() ); assert_eq!(Some(2), p.add_value(&bytes)); assert_eq!( PAGE_SIZE - p.get_header_size() - size * 3, p.get_largest_free_contiguous_space() ); } #[test] fn hs_page_get_value() { init(); let mut p = Page::new(0); let tuple = int_vec_to_tuple(vec![0, 1, 2]); let tuple_bytes = serde_cbor::to_vec(&tuple).unwrap(); assert_eq!(Some(0), p.add_value(&tuple_bytes)); let check_bytes = p.get_value(0).unwrap(); let check_tuple: Tuple = serde_cbor::from_slice(&check_bytes).unwrap(); assert_eq!(tuple_bytes, check_bytes); assert_eq!(tuple, check_tuple); let tuple2 = int_vec_to_tuple(vec![3, 3, 3]); let tuple_bytes2 = serde_cbor::to_vec(&tuple2).unwrap(); assert_eq!(Some(1), p.add_value(&tuple_bytes2)); let check_bytes2 = p.get_value(1).unwrap(); let check_tuple2: Tuple = serde_cbor::from_slice(&check_bytes2).unwrap(); assert_eq!(tuple_bytes2, check_bytes2); assert_eq!(tuple2, check_tuple2); //Recheck let check_bytes2 = p.get_value(1).unwrap(); let check_tuple2: Tuple = serde_cbor::from_slice(&check_bytes2).unwrap(); assert_eq!(tuple_bytes2, check_bytes2); assert_eq!(tuple2, check_tuple2); let check_bytes = p.get_value(0).unwrap(); let check_tuple: Tuple = serde_cbor::from_slice(&check_bytes).unwrap(); assert_eq!(tuple_bytes, check_bytes); assert_eq!(tuple, check_tuple); //Check that invalid slot gets None assert_eq!(None, p.get_value(2)); } #[test] fn hs_page_header_size_small() { init(); // Testing that the header is no more than 8 bytes for the header, and 6 bytes per value inserted let mut p = Page::new(0); assert!(p.get_header_size() <= FIXED_HEADER_SIZE); let bytes = get_random_byte_vec(10); assert_eq!(Some(0), p.add_value(&bytes)); assert!(p.get_header_size() <= FIXED_HEADER_SIZE + HEADER_PER_VAL_SIZE); assert_eq!(Some(1), p.add_value(&bytes)); assert_eq!(Some(2), p.add_value(&bytes)); assert_eq!(Some(3), p.add_value(&bytes)); assert!(p.get_header_size() <= FIXED_HEADER_SIZE + HEADER_PER_VAL_SIZE * 4); } #[test] fn hs_page_header_size_full() { init(); // Testing that the header is no more than 8 bytes for the header, and 6 bytes per value inserted let mut p = Page::new(0); assert!(p.get_header_size() <= FIXED_HEADER_SIZE); let byte_size = 10; let bytes = get_random_byte_vec(byte_size); // how many vals can we hold with 8 bytes let num_vals: usize = (((PAGE_SIZE - FIXED_HEADER_SIZE) as f64 / (byte_size + HEADER_PER_VAL_SIZE) as f64) .floor()) as usize; if PAGE_SIZE == 4096 && FIXED_HEADER_SIZE == 8 && HEADER_PER_VAL_SIZE == 6 { assert_eq!(255, num_vals); } for _ in 0..num_vals { p.add_value(&bytes); } assert!(p.get_header_size() <= FIXED_HEADER_SIZE + (num_vals * HEADER_PER_VAL_SIZE)); assert!( p.get_largest_free_contiguous_space() >= PAGE_SIZE - (byte_size * num_vals) - FIXED_HEADER_SIZE - (num_vals * HEADER_PER_VAL_SIZE) ); } #[test] fn hs_page_no_space() { init(); let mut p = Page::new(0); let size = PAGE_SIZE / 4; let bytes = get_random_byte_vec(size); assert_eq!(Some(0), p.add_value(&bytes)); assert_eq!( PAGE_SIZE - p.get_header_size() - size, p.get_largest_free_contiguous_space() ); assert_eq!(Some(1), p.add_value(&bytes)); assert_eq!( PAGE_SIZE - p.get_header_size() - size * 2, p.get_largest_free_contiguous_space() ); assert_eq!(Some(2), p.add_value(&bytes)); assert_eq!( PAGE_SIZE - p.get_header_size() - size * 3, p.get_largest_free_contiguous_space() ); //Should reject here assert_eq!(None, p.add_value(&bytes)); assert_eq!( PAGE_SIZE - p.get_header_size() - size * 3, p.get_largest_free_contiguous_space() ); // Take small amount of data let small_bytes = get_random_byte_vec(size / 4); assert_eq!(Some(3), p.add_value(&small_bytes)); assert_eq!( PAGE_SIZE - p.get_header_size() - size * 3 - small_bytes.len(), p.get_largest_free_contiguous_space() ); } #[test] fn hs_page_simple_delete() { init(); let mut p = Page::new(0); let tuple = int_vec_to_tuple(vec![0, 1, 2]); let tuple_bytes = serde_cbor::to_vec(&tuple).unwrap(); assert_eq!(Some(0), p.add_value(&tuple_bytes)); let check_bytes = p.get_value(0).unwrap(); let check_tuple: Tuple = serde_cbor::from_slice(&check_bytes).unwrap(); assert_eq!(tuple_bytes, check_bytes); assert_eq!(tuple, check_tuple); let tuple2 = int_vec_to_tuple(vec![3, 3, 3]); let tuple_bytes2 = serde_cbor::to_vec(&tuple2).unwrap(); assert_eq!(Some(1), p.add_value(&tuple_bytes2)); let check_bytes2 = p.get_value(1).unwrap(); let check_tuple2: Tuple = serde_cbor::from_slice(&check_bytes2).unwrap(); assert_eq!(tuple_bytes2, check_bytes2); assert_eq!(tuple2, check_tuple2); //Delete slot 0 assert_eq!(Some(()), p.delete_value(0)); //Recheck slot 1 let check_bytes2 = p.get_value(1).unwrap(); let check_tuple2: Tuple = serde_cbor::from_slice(&check_bytes2).unwrap(); assert_eq!(tuple_bytes2, check_bytes2); assert_eq!(tuple2, check_tuple2); //Verify slot 0 is gone assert_eq!(None, p.get_value(0)); //Check that invalid slot gets None assert_eq!(None, p.get_value(2)); //Delete slot 1 assert_eq!(Some(()), p.delete_value(1)); //Verify slot 0 is gone assert_eq!(None, p.get_value(1)); } #[test] fn hs_page_get_first_free_space() { init(); let p = Page::new(0); assert_eq!(1,1); let _b1 = get_random_byte_vec(100); let _b2 = get_random_byte_vec(50); } #[test] fn hs_page_delete_insert() { init(); let mut p = Page::new(0); let tuple_bytes = get_random_byte_vec(20); let tuple_bytes2 = get_random_byte_vec(20); let tuple_bytes3 = get_random_byte_vec(20); let tuple_bytes4 = get_random_byte_vec(20); let tuple_bytes_big = get_random_byte_vec(40); let tuple_bytes_small1 = get_random_byte_vec(5); let tuple_bytes_small2 = get_random_byte_vec(5); //Add 3 values assert_eq!(Some(0), p.add_value(&tuple_bytes)); let check_bytes = p.get_value(0).unwrap(); assert_eq!(tuple_bytes, check_bytes); assert_eq!(Some(1), p.add_value(&tuple_bytes2)); let check_bytes = p.get_value(1).unwrap(); assert_eq!(tuple_bytes2, check_bytes); assert_eq!(Some(2), p.add_value(&tuple_bytes3)); let check_bytes = p.get_value(2).unwrap(); assert_eq!(tuple_bytes3, check_bytes); //Delete slot 1 assert_eq!(Some(()), p.delete_value(1)); //Verify slot 1 is gone assert_eq!(None, p.get_value(1)); let check_bytes = p.get_value(0).unwrap(); assert_eq!(tuple_bytes, check_bytes); let check_bytes = p.get_value(2).unwrap(); assert_eq!(tuple_bytes3, check_bytes); //Insert same bytes, should go to slot 1 assert_eq!(Some(1), p.add_value(&tuple_bytes4)); let check_bytes = p.get_value(1).unwrap(); assert_eq!(tuple_bytes4, check_bytes); //Delete 0 assert_eq!(Some(()), p.delete_value(0)); //Insert big, should go to slot 0 with space later in free block assert_eq!(Some(0), p.add_value(&tuple_bytes_big)); //Insert small, should go to 3 assert_eq!(Some(3), p.add_value(&tuple_bytes_small1)); //Insert small, should go to new assert_eq!(Some(4), p.add_value(&tuple_bytes_small2)); } #[test] fn hs_page_size() { init(); let mut p = Page::new(2); let tuple = int_vec_to_tuple(vec![0, 1, 2]); let tuple_bytes = serde_cbor::to_vec(&tuple).unwrap(); assert_eq!(Some(0), p.add_value(&tuple_bytes)); let page_bytes = p.get_bytes(); assert_eq!(PAGE_SIZE, page_bytes.len()); } #[test] fn hs_page_simple_byte_serialize() { init(); let mut p = Page::new(0); let tuple = int_vec_to_tuple(vec![0, 1, 2]); let tuple_bytes = serde_cbor::to_vec(&tuple).unwrap(); assert_eq!(Some(0), p.add_value(&tuple_bytes)); let tuple2 = int_vec_to_tuple(vec![3, 3, 3]); let tuple_bytes2 = serde_cbor::to_vec(&tuple2).unwrap(); assert_eq!(Some(1), p.add_value(&tuple_bytes2)); //Get bytes and create from bytes let bytes = p.get_bytes(); let mut p2 = Page::from_bytes(&bytes); assert_eq!(0, p2.get_page_id()); //Check reads let check_bytes2 = p2.get_value(1).unwrap(); let check_tuple2: Tuple = serde_cbor::from_slice(&check_bytes2).unwrap(); assert_eq!(tuple_bytes2, check_bytes2); assert_eq!(tuple2, check_tuple2); let check_bytes = p2.get_value(0).unwrap(); let check_tuple: Tuple = serde_cbor::from_slice(&check_bytes).unwrap(); assert_eq!(tuple_bytes, check_bytes); assert_eq!(tuple, check_tuple); //Add a new tuple to the new page let tuple3 = int_vec_to_tuple(vec![4, 3, 2]); let tuple_bytes3 = tuple3.get_bytes(); assert_eq!(Some(2), p2.add_value(&tuple_bytes3)); assert_eq!(tuple_bytes3, p2.get_value(2).unwrap()); assert_eq!(tuple_bytes2, p2.get_value(1).unwrap()); assert_eq!(tuple_bytes, p2.get_value(0).unwrap()); } #[test] fn hs_page_iter() { init(); let mut p = Page::new(0); let tuple = int_vec_to_tuple(vec![0, 0, 1]); let tuple_bytes = serde_cbor::to_vec(&tuple).unwrap(); assert_eq!(Some(0), p.add_value(&tuple_bytes)); let tuple2 = int_vec_to_tuple(vec![0, 0, 2]); let tuple_bytes2 = serde_cbor::to_vec(&tuple2).unwrap(); assert_eq!(Some(1), p.add_value(&tuple_bytes2)); let tuple3 = int_vec_to_tuple(vec![0, 0, 3]); let tuple_bytes3 = serde_cbor::to_vec(&tuple3).unwrap(); assert_eq!(Some(2), p.add_value(&tuple_bytes3)); let tuple4 = int_vec_to_tuple(vec![0, 0, 4]); let tuple_bytes4 = serde_cbor::to_vec(&tuple4).unwrap(); assert_eq!(Some(3), p.add_value(&tuple_bytes4)); let tup_vec = vec![ tuple_bytes.clone(), tuple_bytes2.clone(), tuple_bytes3.clone(), tuple_bytes4.clone(), ]; let page_bytes = p.get_bytes(); // Test iteration 1 let mut iter = p.into_iter(); assert_eq!(Some(tuple_bytes.clone()), iter.next()); assert_eq!(Some(tuple_bytes2.clone()), iter.next()); assert_eq!(Some(tuple_bytes3.clone()), iter.next()); assert_eq!(Some(tuple_bytes4.clone()), iter.next()); assert_eq!(None, iter.next()); //Check another way let p = Page::from_bytes(&page_bytes); assert_eq!(Some(tuple_bytes.clone()), p.get_value(0)); for (i, x) in p.into_iter().enumerate() { assert_eq!(tup_vec[i], x); } let p = Page::from_bytes(&page_bytes); let mut count = 0; for _ in p { count += 1; } assert_eq!(count, 4); //Add a value and check let mut p = Page::from_bytes(&page_bytes); assert_eq!(Some(4), p.add_value(&tuple_bytes)); //get the updated bytes let page_bytes = p.get_bytes(); count = 0; for _ in p { count += 1; } assert_eq!(count, 5); //Delete let mut p = Page::from_bytes(&page_bytes); p.delete_value(2); let mut iter = p.into_iter(); assert_eq!(Some(tuple_bytes.clone()), iter.next()); assert_eq!(Some(tuple_bytes2.clone()), iter.next()); assert_eq!(Some(tuple_bytes4.clone()), iter.next()); assert_eq!(Some(tuple_bytes.clone()), iter.next()); assert_eq!(None, iter.next()); } }
use std::mem::MaybeUninit; use std::collections::BTreeMap; use super::environment::AtomID; use super::local_context::LocalContext; use crate::util::*; pub struct Spans<T> { stmt: MaybeUninit<Span>, decl: MaybeUninit<AtomID>, pub lc: Option<LocalContext>, data: BTreeMap<usize, Vec<(Span, T)>>, } use std::fmt; impl<T: fmt::Debug> fmt::Debug for Spans<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{{ stmt: {:?},\n data: {:?} }}", self.stmt(), self.data) } } impl<T> Spans<T> { pub fn new() -> Spans<T> { Spans { stmt: MaybeUninit::uninit(), decl: MaybeUninit::uninit(), lc: None, data: BTreeMap::new() } } pub fn set_stmt(&mut self, sp: Span) { self.stmt = MaybeUninit::new(sp) } pub fn set_decl(&mut self, a: AtomID) { self.decl = MaybeUninit::new(a) } pub fn stmt(&self) -> Span { unsafe { self.stmt.assume_init() } } pub fn _decl(&self) -> AtomID { unsafe { self.decl.assume_init() } } pub fn insert(&mut self, sp: Span, val: T) -> &mut T { let v = self.data.entry(sp.start).or_default(); for (sp1, k) in &mut *v { if sp == *sp1 {return unsafe {&mut *(k as *mut T)}} } // the unsafe above is needed because NLL support is not all there, // and this looks like a double borrow of `*v` v.push((sp, val)); &mut v.last_mut().unwrap().1 } pub fn insert_if(&mut self, sp: Span, val: impl FnOnce() -> T) { if sp.start >= self.stmt().start { self.insert(sp, val()); } } pub fn _get(&self, sp: Span) -> Option<&T> { self.data.get(&sp.start).and_then(|v| v.iter().find(|x| x.0 == sp).map(|x| &x.1)) } pub fn _get_mut(&mut self, sp: Span) -> Option<&mut T> { self.data.get_mut(&sp.start).and_then(|v| v.iter_mut().find(|x| x.0 == sp).map(|x| &mut x.1)) } pub fn find_pos(&self, pos: usize) -> Vec<&(Span, T)> { if let Some((_, v)) = self.data.range(..=pos).rev().next() { v.iter().filter(|x| pos < x.0.end).collect() } else {vec![]} } }
#[doc = "Register `FPR2` reader"] pub type R = crate::R<FPR2_SPEC>; #[doc = "Register `FPR2` writer"] pub type W = crate::W<FPR2_SPEC>; #[doc = "Field `FPIF35` reader - FPIF35"] pub type FPIF35_R = crate::BitReader; #[doc = "Field `FPIF35` writer - FPIF35"] pub type FPIF35_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `FPIF36` reader - FPIF36"] pub type FPIF36_R = crate::BitReader; #[doc = "Field `FPIF36` writer - FPIF36"] pub type FPIF36_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `FPIF37` reader - FPIF37"] pub type FPIF37_R = crate::BitReader; #[doc = "Field `FPIF37` writer - FPIF37"] pub type FPIF37_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `FPIF38` reader - FPIF38"] pub type FPIF38_R = crate::BitReader; #[doc = "Field `FPIF38` writer - FPIF38"] pub type FPIF38_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; impl R { #[doc = "Bit 3 - FPIF35"] #[inline(always)] pub fn fpif35(&self) -> FPIF35_R { FPIF35_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bit 4 - FPIF36"] #[inline(always)] pub fn fpif36(&self) -> FPIF36_R { FPIF36_R::new(((self.bits >> 4) & 1) != 0) } #[doc = "Bit 5 - FPIF37"] #[inline(always)] pub fn fpif37(&self) -> FPIF37_R { FPIF37_R::new(((self.bits >> 5) & 1) != 0) } #[doc = "Bit 6 - FPIF38"] #[inline(always)] pub fn fpif38(&self) -> FPIF38_R { FPIF38_R::new(((self.bits >> 6) & 1) != 0) } } impl W { #[doc = "Bit 3 - FPIF35"] #[inline(always)] #[must_use] pub fn fpif35(&mut self) -> FPIF35_W<FPR2_SPEC, 3> { FPIF35_W::new(self) } #[doc = "Bit 4 - FPIF36"] #[inline(always)] #[must_use] pub fn fpif36(&mut self) -> FPIF36_W<FPR2_SPEC, 4> { FPIF36_W::new(self) } #[doc = "Bit 5 - FPIF37"] #[inline(always)] #[must_use] pub fn fpif37(&mut self) -> FPIF37_W<FPR2_SPEC, 5> { FPIF37_W::new(self) } #[doc = "Bit 6 - FPIF38"] #[inline(always)] #[must_use] pub fn fpif38(&mut self) -> FPIF38_W<FPR2_SPEC, 6> { FPIF38_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "EXTI falling edge pending register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`fpr2::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`fpr2::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct FPR2_SPEC; impl crate::RegisterSpec for FPR2_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`fpr2::R`](R) reader structure"] impl crate::Readable for FPR2_SPEC {} #[doc = "`write(|w| ..)` method takes [`fpr2::W`](W) writer structure"] impl crate::Writable for FPR2_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets FPR2 to value 0"] impl crate::Resettable for FPR2_SPEC { const RESET_VALUE: Self::Ux = 0; }
use super::flags8::Flags8; use ::message::{AsDataId, AsDataValue}; use ::application::ComplexType; use ::Error; /// The typed payload of the message without the interpretation #[derive(Copy, Clone)] pub enum SimpleTypeEnum { /// High byte u8, low byte u8 U8u8(u8, u8), /// High byte u8, low byte s8 U8s8(u8, i8), /// High byte u8, low byte flags8 U8flags8(u8, Flags8), /// High byte s8, low byte u8 S8u8(i8, u8), /// High byte s8, low byte s8 S8s8(i8, i8), /// High byte s8, low byte flags8 S8flags8(i8, Flags8), /// High byte flags8, low byte u8 Flags8u8(Flags8, u8), /// High byte flags8, low byte i8 Flags8s8(Flags8, i8), /// High byte flags8, low byte flags8 Flags8flags8(Flags8, Flags8), /// High byte u8, no low byte defined U8(u8), /// High byte s8, no low byte defined S8(i8), /// High byte flags8, no low byte defined Flags8(Flags8), /// High and low bytes combined into u16 U16(u16), /// High and low bytes combined into s16 S16(i16), /// High and low bytes combined into f8.8 (fixed point number) F88(f32) } impl From<(u8,u8)> for SimpleTypeEnum { fn from(input: (u8,u8)) -> SimpleTypeEnum { SimpleTypeEnum::U8u8(input.0, input.1) } } impl From<(u8,i8)> for SimpleTypeEnum { fn from(input: (u8,i8)) -> SimpleTypeEnum { SimpleTypeEnum::U8s8(input.0, input.1) } } impl From<(u8,Flags8)> for SimpleTypeEnum { fn from(input: (u8,Flags8)) -> SimpleTypeEnum { SimpleTypeEnum::U8flags8(input.0, input.1) } } impl From<(i8,u8)> for SimpleTypeEnum { fn from(input: (i8,u8)) -> SimpleTypeEnum { SimpleTypeEnum::S8u8(input.0, input.1) } } impl From<(i8,i8)> for SimpleTypeEnum { fn from(input: (i8,i8)) -> SimpleTypeEnum { SimpleTypeEnum::S8s8(input.0, input.1) } } impl From<(i8,Flags8)> for SimpleTypeEnum { fn from(input: (i8,Flags8)) -> SimpleTypeEnum { SimpleTypeEnum::S8flags8(input.0, input.1) } } impl From<(Flags8,u8)> for SimpleTypeEnum { fn from(input: (Flags8,u8)) -> SimpleTypeEnum { SimpleTypeEnum::Flags8u8(input.0, input.1) } } impl From<(Flags8,i8)> for SimpleTypeEnum { fn from(input: (Flags8,i8)) -> SimpleTypeEnum { SimpleTypeEnum::Flags8s8(input.0, input.1) } } impl From<(Flags8,Flags8)> for SimpleTypeEnum { fn from(input: (Flags8,Flags8)) -> SimpleTypeEnum { SimpleTypeEnum::Flags8flags8(input.0, input.1) } } impl From<u8> for SimpleTypeEnum { fn from(input: u8) -> SimpleTypeEnum { SimpleTypeEnum::U8(input) } } impl From<i8> for SimpleTypeEnum { fn from(input: i8) -> SimpleTypeEnum { SimpleTypeEnum::S8(input) } } impl From<Flags8> for SimpleTypeEnum { fn from(input: Flags8) -> SimpleTypeEnum { SimpleTypeEnum::Flags8(input) } } impl From<u16> for SimpleTypeEnum { fn from(input: u16) -> SimpleTypeEnum { SimpleTypeEnum::U16(input) } } impl From<i16> for SimpleTypeEnum { fn from(input: i16) -> SimpleTypeEnum { SimpleTypeEnum::S16(input) } } impl From<f32> for SimpleTypeEnum { fn from(input: f32) -> SimpleTypeEnum { SimpleTypeEnum::F88(input) } } impl SimpleTypeEnum { /*pub(crate) fn new_from_message(msg: &(Message + 'static)) -> Result<SimpleTypeEnum, Error> { super::complextype::to_simple_type((msg as &Message).data_id(), msg.data_value()) }*/ /// Creates a new simpletype from a class implementing dataid and a class implementing datavalue pub fn new<TDataId: AsDataId, TDataValue: AsDataValue>(dataid: &TDataId, datavalue: &AsDataValue) -> Result<SimpleTypeEnum, Error> { let complextype = try!(ComplexType::new_from_data(dataid.as_data_id(), datavalue.as_data_value())); Ok(complextype.into()) } } impl AsDataValue for SimpleTypeEnum { fn as_data_value(&self) -> [u8; 2] { match self { &SimpleTypeEnum::U8u8(first, second) => (first, second).to_data(), &SimpleTypeEnum::U8s8(first, second) => (first, second).to_data(), &SimpleTypeEnum::U8flags8(first, second) => (first, second).to_data(), &SimpleTypeEnum::S8u8(first, second) => (first, second).to_data(), &SimpleTypeEnum::S8s8(first, second) => (first, second).to_data(), &SimpleTypeEnum::S8flags8(first, second) => (first, second).to_data(), &SimpleTypeEnum::Flags8u8(first, second) => (first, second).to_data(), &SimpleTypeEnum::Flags8s8(first, second) => (first, second).to_data(), &SimpleTypeEnum::Flags8flags8(first, second) => (first, second).to_data(), &SimpleTypeEnum::U8(first) => SimpleType::to_data(first), &SimpleTypeEnum::S8(first) => SimpleType::to_data(first), &SimpleTypeEnum::Flags8(first) => SimpleType::to_data(first), &SimpleTypeEnum::U16(first) => SimpleType::to_data(first), &SimpleTypeEnum::S16(first) => SimpleType::to_data(first), &SimpleTypeEnum::F88(first) => SimpleType::to_data(first), } } } /// Simple types pub(crate) trait SimpleSubtype : Copy + Clone { fn from_data(input: u8) -> Self; fn to_data(self) -> u8; } pub(crate) trait SimpleType : Copy + Clone { fn from_data(input: [u8; 2]) -> Self; fn to_data(self) -> [u8; 2]; } impl SimpleSubtype for u8 { fn from_data(input: u8) -> Self { input } fn to_data(self) -> u8 { self } } impl SimpleSubtype for i8 { fn from_data(input: u8) -> Self { input as i8 } fn to_data(self) -> u8 { self as u8 } } impl SimpleSubtype for Flags8 { fn from_data(input: u8) -> Self { Flags8::from(input) } fn to_data(self) -> u8 { self.bits } } impl<T1 : SimpleSubtype,T2: SimpleSubtype> SimpleType for (T1, T2) { fn from_data(input: [u8; 2]) -> Self { (T1::from_data(input[0]), T2::from_data(input[1])) } fn to_data(self) -> [u8; 2] { [ self.0.to_data(), self.1.to_data() ] } } impl<T: SimpleSubtype> SimpleType for T { fn from_data(input: [u8; 2]) -> Self { T::from_data(input[0]) } fn to_data(self) -> [u8; 2] { [self.to_data(), 0] } } impl SimpleType for u16 { fn from_data(input: [u8; 2]) -> Self { ((input[0] as u16) << 8) | input[1] as u16 } fn to_data(self) -> [u8; 2] { [ (self >> 8) as u8, (self & 0xff) as u8 ] } } impl SimpleType for i16 { fn from_data(input: [u8; 2]) -> Self { u16::from_data(input) as i16 } fn to_data(self) -> [u8; 2] { (self as u16).to_data() } } impl SimpleType for f32 { fn from_data(input: [u8; 2]) -> Self { // (input.data[0] as f32) + (input.data[1] as f32) / 256f32 (i16::from_data(input) as f32) / 256f32 } fn to_data(self) -> [u8; 2] { ((self * 256f32) as i16).to_data() } }
/** * Copyright (c) 2019, Sébastien Blin <sebastien.blin@enconn.fr> * All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the University of California, Berkeley nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **/ use super::super::gen::utils::Direction; use super::Room; use crate::serde::Serialize; use rmps::Serializer; use std::collections::HashMap; use std::sync::{ Arc, Mutex }; pub type PlayerStream = Arc<Mutex<Option<Vec<u8>>>>; pub type GameStream = Arc<Mutex<Vec<Vec<u8>>>>; struct Stream { pub tx: PlayerStream, pub rx: GameStream } /** * Represent the main server, manage rooms */ pub struct Server { lobby: Room, rooms: HashMap<u64, Room>, player_to_room: HashMap<u64, u64>, current_room_id: u64, player_to_stream: HashMap<u64, Stream> } impl Server { /** * Create a new Server */ pub fn new() -> Server { Server { lobby: Room::new(), rooms: HashMap::new(), player_to_room: HashMap::new(), current_room_id: 0, player_to_stream: HashMap::new(), } } /** * A new player is coming. Add it to the lobby * @param id The player id * @return If the operation is successful */ pub fn join_server(&mut self, id: u64, player_stream: PlayerStream) -> bool { info!("Client ({}) is in the lobby", id); let rx = Arc::new(Mutex::new(Vec::new())); self.player_to_room.insert(id, 0); self.player_to_stream.insert(id, Stream { tx: player_stream, rx: rx.clone(), }); self.lobby.join(id, rx) } /** * A player is creating a room. Add it to this room at the end * @param id The player id * @return The id of the room created */ pub fn create_room(&mut self, id: u64) -> u64 { if !self.player_to_room.contains_key(&id) { warn!("Can't create room because player is not in the server"); return 0; } let room_id = self.player_to_room[&id]; if room_id != 0 && !self.rooms.contains_key(&room_id) { warn!("Can't remove player from Room because rooms doesn't exists"); return 0; } if room_id == 0 { self.lobby.remove_player(id); } else { let remove = self.rooms.get_mut(&room_id).unwrap().remove_player(id); if remove { info!("Remove room ({})", room_id); self.rooms.remove(&room_id); } } let mut room = Room::new_with_capacity(4); let rx = self.player_to_stream[&id].rx.clone(); if room.join(id, rx) { self.current_room_id += 1; self.rooms.insert(self.current_room_id, room); *self.player_to_room.get_mut(&id).unwrap() = self.current_room_id; info!("Client ({}) is now in Room ({})", id, self.current_room_id); } else { *self.player_to_room.get_mut(&id).unwrap() = 0; warn!("Client ({}) can't join room. Going to room ({})", id, 0); } self.current_room_id } /** * A player is joining an existing room. * @param id The player id * @param join_id The room to join * @return If the operation is successful */ pub fn join_room(&mut self, id: u64, join_id: u64) -> bool { if !self.player_to_room.contains_key(&id) { warn!("Can't join room because player is not in the server"); return false; } let room_id = self.player_to_room[&id]; if room_id == join_id { warn!("Player try to join its current room"); return false; } if room_id != 0 && !self.rooms.contains_key(&room_id) { warn!("Can't remove player from Room because rooms doesn't exists"); return false; } if !self.rooms.contains_key(&join_id) { warn!("Player try to join inexistant room {}", join_id); return false; } if room_id == 0 { self.lobby.remove_player(id); } else { let remove = self.rooms.get_mut(&room_id).unwrap().remove_player(id); if remove { info!("Remove room ({})", room_id); self.rooms.remove(&room_id); } } let rx = self.player_to_stream[&id].rx.clone(); if join_id == 0 { self.lobby.join(id, rx); *self.player_to_room.get_mut(&id).unwrap() = join_id; } else { if self.rooms.get_mut(&join_id).unwrap().join(id, rx) { *self.player_to_room.get_mut(&id).unwrap() = join_id; info!("Client ({}) is now in Room ({})", id, join_id); } else { *self.player_to_room.get_mut(&id).unwrap() = 0; warn!("Client ({}) can't join room. Going to room ({})", id, 0); } } true } pub fn leave_room(&mut self, id: u64) -> bool { if !self.player_to_room.contains_key(&id) { warn!("Can't leave room because player is not in the server"); return false; } let room_id = self.player_to_room[&id]; if room_id == 0 { warn!("Player try to leave lobby"); return false; } if room_id != 0 && !self.rooms.contains_key(&room_id) { warn!("Can't remove player from Room because rooms doesn't exists"); return false; } if room_id != 0 { let remove = self.rooms.get_mut(&room_id).unwrap().remove_player(id); if remove { info!("Remove room ({})", room_id); self.rooms.remove(&room_id); } } let rx = self.player_to_stream[&id].rx.clone(); self.lobby.join(id, rx); *self.player_to_room.get_mut(&id).unwrap() = 0; info!("Client ({}) is now in Room ({})", id, 0); true } /** * A player is launching the game. * @param id The player id * @return If the operation is successful */ pub fn launch_game(&mut self, id: u64) -> bool { if !self.player_to_room.contains_key(&id) { warn!("Can't launch game because player is not in the server"); return false; } let room_id = self.player_to_room[&id]; if room_id == 0 { warn!("Can't launch game from lobby"); return false; } if !self.rooms.contains_key(&room_id) { warn!("Can't launch game because room doesn't exists"); return false; } self.rooms.get_mut(&room_id).unwrap().launch_game(id); info!("Client ({}) launched game in room ({})", id, self.current_room_id); self.send_resources(room_id); true } /** * A player put a bomb. * @param id The player id * @return If the operation is successful */ pub fn put_bomb(&mut self, id: u64) -> bool { if !self.player_to_room.contains_key(&id) { warn!("Can't put bomb because player is not in the server"); return false; } let room_id = self.player_to_room[&id]; if room_id == 0 { warn!("Can't put bomb from lobby"); return false; } if !self.rooms.contains_key(&room_id) { warn!("Can't put bomb because room doesn't exists"); return false; } self.rooms.get_mut(&room_id).unwrap().put_bomb(id); info!("Client ({}) putted bomb in room ({})", id, self.current_room_id); true } /** * A player move in a direction * @param id The player id * @param direction The direction chosen * @return If the operation is successful */ pub fn move_player(&mut self, id: u64, direction: Direction) -> bool { if !self.player_to_room.contains_key(&id) { warn!("Can't move because player is not in the server"); return false; } let room_id = self.player_to_room[&id]; if room_id == 0 { warn!("Can't move from lobby"); return false; } if !self.rooms.contains_key(&room_id) { warn!("Can't move because room doesn't exists"); return false; } if self.rooms.get_mut(&room_id).unwrap().move_player(id, direction) { info!("Client ({}) moved {:?} in room ({})", id, direction, self.current_room_id); } true } pub fn get_events(&mut self, player: &u64) -> Vec<Vec<u8>> { if self.player_to_stream.contains_key(player) { let mut rx = self.player_to_stream[player].rx.lock().unwrap(); let result = rx.clone(); *rx = Vec::new(); return result; } Vec::new() } fn send_resources(&mut self, room_id: u64) { info!("Sending resources for room {}", room_id); let players = self.rooms.get(&room_id).unwrap().players.keys(); let mut buf = Vec::new(); let msg = self.rooms.get(&room_id).unwrap().get_map_msg(); msg.serialize(&mut Serializer::new(&mut buf)).unwrap(); let len = buf.len() as u16; let mut send_buf : Vec<u8> = Vec::with_capacity(65536); send_buf.push((len >> 8) as u8); send_buf.push((len as u16 % (2 as u16).pow(8)) as u8); send_buf.append(&mut buf); for player in players { if self.player_to_stream.contains_key(player) { // TODO is it quick enough? Or add queue info!("Sending resources for player {}", player); *self.player_to_stream[player].tx.lock().unwrap() = Some(send_buf.clone()); } } } }
//! Repos is a module responsible for interacting with access control lists //! Authorization module contains authorization logic for the repo layer app #[macro_use] pub mod macros; pub mod legacy_acl; pub mod roles_cache; pub use self::roles_cache::RolesCacheImpl; use std::collections::HashMap; use std::rc::Rc; use errors::Error; use failure::Error as FailureError; use stq_types::{DeliveryRole, UserId}; use self::legacy_acl::{Acl, CheckScope}; use models::authorization::*; pub fn check<T>( acl: &Acl<Resource, Action, Scope, FailureError, T>, resource: Resource, action: Action, scope_checker: &CheckScope<Scope, T>, obj: Option<&T>, ) -> Result<(), FailureError> { acl.allows(resource, action, scope_checker, obj).and_then(|allowed| { if allowed { Ok(()) } else { Err(format_err!("Denied request to do {:?} on {:?}", action, resource) .context(Error::Forbidden) .into()) } }) } /// ApplicationAcl contains main logic for manipulation with resources #[derive(Clone)] pub struct ApplicationAcl { acls: Rc<HashMap<DeliveryRole, Vec<Permission>>>, roles: Vec<DeliveryRole>, user_id: UserId, } impl ApplicationAcl { pub fn new(roles: Vec<DeliveryRole>, user_id: UserId) -> Self { let mut hash = ::std::collections::HashMap::new(); hash.insert( DeliveryRole::Superuser, vec![ permission!(Resource::Companies), permission!(Resource::CompaniesPackages), permission!(Resource::Countries), permission!(Resource::Packages), permission!(Resource::Pickups), permission!(Resource::Products), permission!(Resource::ShippingRates), permission!(Resource::UserAddresses), permission!(Resource::UserRoles), ], ); hash.insert( DeliveryRole::User, vec![ permission!(Resource::Companies, Action::Read), permission!(Resource::CompaniesPackages, Action::Read), permission!(Resource::Countries, Action::Read), permission!(Resource::Packages, Action::Read), permission!(Resource::Pickups, Action::Read), permission!(Resource::Products, Action::Read), permission!(Resource::ShippingRates, Action::Read), permission!(Resource::UserAddresses, Action::All, Scope::Owned), permission!(Resource::UserRoles, Action::Read, Scope::Owned), ], ); hash.insert( DeliveryRole::StoreManager, vec![ permission!(Resource::Pickups, Action::All, Scope::Owned), permission!(Resource::Products, Action::All, Scope::Owned), ], ); ApplicationAcl { acls: Rc::new(hash), roles, user_id, } } } impl<T> Acl<Resource, Action, Scope, FailureError, T> for ApplicationAcl { fn allows( &self, resource: Resource, action: Action, scope_checker: &CheckScope<Scope, T>, obj: Option<&T>, ) -> Result<bool, FailureError> { let empty: Vec<Permission> = Vec::new(); let user_id = &self.user_id; let hashed_acls = self.acls.clone(); let acls = self .roles .iter() .flat_map(|role| hashed_acls.get(role).unwrap_or(&empty)) .filter(|permission| (permission.resource == resource) && ((permission.action == action) || (permission.action == Action::All))) .filter(|permission| scope_checker.is_in_scope(*user_id, &permission.scope, obj)); if acls.count() > 0 { Ok(true) } else { error!("Denied request from user {} to do {} on {}.", user_id, action, resource); Ok(false) } } } /// UnauthorizedAcl contains main logic for manipulation with resources #[derive(Clone, Default)] pub struct UnauthorizedAcl; impl<T> Acl<Resource, Action, Scope, FailureError, T> for UnauthorizedAcl { fn allows( &self, resource: Resource, action: Action, _scope_checker: &CheckScope<Scope, T>, _obj: Option<&T>, ) -> Result<bool, FailureError> { if action == Action::Read { match resource { Resource::Companies => Ok(true), Resource::CompaniesPackages => Ok(true), Resource::Countries => Ok(true), Resource::Packages => Ok(true), Resource::Pickups => Ok(true), Resource::Products => Ok(true), _ => Ok(false), } } else { error!("Denied unauthorized request to do {} on {}.", action, resource); Ok(false) } } } #[cfg(test)] mod tests { // write tests }
use std::fs::File; use std::error::Error; use std::path::Path; use std::io::prelude::*; #[allow(dead_code)] fn read_file(file_path: &str) -> i32 { let path = Path::new(file_path); let display = path.display(); let mut file = match File::open(&path) { Ok(file) => file, Err(why) => panic!("Couldn't open {} : {}", display, Error::description(&why)), }; let mut string = String::new(); let mut running_total = 0; match file.read_to_string(&mut string) { Ok(_) => { let actual_numbers: Vec<_> = string.split("\n") .map(|x| match x.parse::<i32>() { Ok(num) => num, Err(_) => 0, }).collect(); println!("Vec<&str> as numbers: {:?}", actual_numbers); for num in actual_numbers { running_total = running_total + num; } println!("TOTAL = {}", running_total); return running_total; }, Err(why) => panic!("Couldn't read {} : {}", display, Error::description(&why)), } } #[test] fn numbers_txt_works() { assert_eq!(read_file("numbers.txt"), 28); } #[test] fn numbers2_txt_works() { assert_eq!(read_file("numbers2.txt"), 93); }
use std::io::{Error, ErrorKind}; use run_script::ScriptOptions; use uuid::Uuid; pub fn extract(archive_path: &str) -> Result<String, Error> { // Generates a unique id to stop this from conflicting let output = format!("/tmp/libdeb/{}/", Uuid::new_v4()); if cfg!(target_os = "windows") { // We don't support windows return Err(Error::new( ErrorKind::Other, "The target_os windows is not supported. Please only use on linux", )); } else { let data_archive = format!("{}data.tar.xz", &output); let data_extract = format!("{}data/", &output); let control_archive = format!("{}control.tar.xz", &output); let control_extract = format!("{}control/", &output); let _ = run_script::run( &format!( " mkdir -p {}; mkdir -p {}; mkdir -p {}; ar -x {} --output={}; tar -xf {} -C {}; tar -xf {} -C {}; ", output, data_extract, control_extract, archive_path, output, data_archive, data_extract, control_archive, control_extract ), &vec![], &ScriptOptions::new(), ); // if run_cmd! { // mkdir -p ${output}; // mkdir -p ${data_extract}; // mkdir -p ${control_extract}; // ar -x ${archive_path} --output=${output}; // tar -xf ${data_archive} -C ${data_extract}; // tar -xf ${control_archive} -C ${control_extract}; // } // .is_err() // { // return Err(Error::new(ErrorKind::Other, "Error extracting files")); // } } Ok(output) }
use demo_parser::reader::Reader; use demo_parser::structs::demo::Demo; #[test] fn new() { let mut reader = Reader::new_from_path("./tests/test_data/test2.dem").unwrap(); let demo = Demo::new(&mut reader).unwrap(); }
use common::{extend, is_atomic, is_self_evaluating, lookup, read, update, Result}; use lisp_core::common::{BasicLispValue, CombinedLispOps, NumericLispValue}; use lisp_core::simple::Value as lcValue; use std::rc::Rc; // Unfortunately, this interpreter is not properly tail-recursive. fn main() { let env = init_global_environment(); loop { match read().and_then(|expr| evaluate(&expr, &env)) { Ok(value) => println!("{:?}", value), Err(e) => eprintln!("Error: {:?}", e), } } } macro_rules! def_primitive { ($env:expr, $name:expr, $func:expr) => { define!($env, $name, lcValue::Function(Callable(Rc::new($func)))) }; } macro_rules! define { ($env:expr, $name:expr, $value:expr) => { Value::cons(Value::cons(lcValue::Symbol($name), $value), $env) }; } fn init_global_environment() -> Value { let env = define!(lcValue::Nil, "x", lcValue::Undefined); let env = define!(env, "y", lcValue::Undefined); let env = define!(env, "z", lcValue::Undefined); let env = define!(env, "fib", lcValue::Undefined); let env = define!(env, "list", lcValue::Undefined); let env = def_primitive!(env, "cons", |args: &Value| Ok(Value::cons( args.car()?.clone(), args.cadr()?.clone() ))); let env = def_primitive!(env, "car", |args: &Value| Ok(args.caar()?.clone())); let env = def_primitive!(env, "cdr", |args: &Value| Ok(args.cdar()?.clone())); let env = def_primitive!(env, "set-car!", |args: &Value| { args.car()?.set_car(args.cadr()?.clone())?; Ok(lcValue::Undefined) }); let env = def_primitive!(env, "set-cdr!", |args: &Value| { args.car()?.set_cdr(args.cadr()?.clone())?; Ok(lcValue::Undefined) }); let env = def_primitive!(env, "+", |args: &Value| Ok(args .car()? .add(args.cadr()?)?)); let env = def_primitive!(env, "-", |args: &Value| Ok(args .car()? .sub(args.cadr()?)?)); let env = def_primitive!(env, "<", |args: &Value| Ok(args .car()? .is_less(args.cadr()?)?)); env } fn evaluate(expr: &Value, env: &Value) -> Result<Value> { use lcValue::*; if is_atomic(expr) { if expr.is_symbol() { Ok(lookup(expr, env)?.clone()) } else if is_self_evaluating(expr) { Ok(expr.clone()) } else { panic!("Cannot evaluate {:?}", expr) } } else { match expr.car()? { Symbol("quote") => Ok(expr.cadr()?.clone()), Symbol("if") => { if evaluate(expr.cadr()?, env)?.as_bool().unwrap() { evaluate(expr.caddr()?, env) } else { evaluate(expr.cadddr()?, env) } } Symbol("begin") => eprogn(expr.cdr()?, env), Symbol("set!") => update(expr.cadr()?, env, &evaluate(expr.caddr()?, env)?), Symbol("lambda") => make_function(expr.cadr()?, expr.cddr()?, env), _ => invoke(&evaluate(expr.car()?, env)?, &evlis(expr.cdr()?, env)?), } } } fn eprogn(exps: &Value, env: &Value) -> Result<Value> { if exps.is_pair() { let rest = exps.cdr()?; if rest.is_pair() { evaluate(exps.car()?, env)?; eprogn(exps.cdr()?, env) } else { evaluate(exps.car()?, env) } } else { Ok(lcValue::Undefined) } } fn evlis(exps: &Value, env: &Value) -> Result<Value> { if exps.is_pair() { let argument = evaluate(exps.car()?, env)?; Ok(Value::cons(argument, evlis(exps.cdr()?, env)?)) } else { Ok(lcValue::Nil) } } fn make_function(params: &Value, body: &Value, env: &Value) -> Result<Value> { let params = params.clone(); let body = body.clone(); let env = env.clone(); Ok(lcValue::Function(Callable(Rc::new(move |args| { eprogn(&body, &extend(&env, &params, args)?) })))) } fn invoke(func: &Value, args: &Value) -> Result<Value> { match func { lcValue::Function(Callable(func)) => func(args), _ => panic!("Not a function: {:?}", func), } } type Value = lcValue<Callable>; #[derive(Clone)] struct Callable(Rc<dyn Fn(&Value) -> Result<Value>>); impl std::fmt::Debug for Callable { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "<callable {:p}>", &*self.0) } }
use std::fs::File; use std::io; use std::io::{Read, Write}; use std::path::Path; use exonum::crypto; use exonum::crypto::{PublicKey, SecretKey}; use exonum::encoding::serialize::FromHex; use dmbc::config; pub fn pair(name: &str) -> io::Result<(PublicKey, SecretKey)> { let keys_path = config::config().api().keys_path(); let public_path = keys_path.clone() + "/" + name + ".pub"; let secret_path = keys_path.clone() + "/" + name; slurp(&public_path) .map(|key_string| PublicKey::from_hex(key_string).unwrap()) .and_then(|public_key| { let key_string = slurp(&secret_path)?; let secret_key = SecretKey::from_hex(key_string).unwrap(); Ok((public_key, secret_key)) }) .or_else(|e| { if e.kind() != io::ErrorKind::NotFound { return Err(e); } let (public_key, secret_key) = crypto::gen_keypair(); let mut public_file = File::create(public_path)?; let mut secret_file = File::create(secret_path)?; write!(public_file, "{}", public_key.to_hex())?; write!(secret_file, "{}", secret_key.to_hex())?; Ok((public_key, secret_key)) }) } fn slurp<P: AsRef<Path>>(filename: P) -> io::Result<String> { let mut out = String::new(); File::open(filename) .and_then(|mut file| file.read_to_string(&mut out)) .map(move |_| out) }
pub mod output { pub trait Echo { /// ### 改行あり出力 /// ``` rust /// let abc = "abc"; /// abc.echo(); /// // expected: /// // abc /// /// let v = vec![1, 2, 3]; /// v.echo(); /// // expected: /// // 1 /// // 2 /// // 3 /// ``` fn echo(&self); /// ### 各要素の間に何か挟み、最後に改行をつけて出力 /// ``` rust /// let abc = "abc"; /// abc.echo_with("d"); /// // expected: abcd /// /// let v = vec![1, 2, 3]; /// v.echo_with(" "); /// // expected: /// // 1 2 3 /// ``` fn echo_with(&self, s: &str); } impl<T> Echo for T where T: std::fmt::Display, { fn echo(&self) { println!("{}", self); } fn echo_with(&self, s: &str) { print!("{}{}", self, s); } } impl<T> Echo for [T] where T: std::fmt::Display, { fn echo(&self) { use itertools::Itertools; println!("{}", self.iter().format("\n")); } fn echo_with(&self, s: &str) { use itertools::Itertools; println!("{}", self.iter().format(s)); } } }
//! Render example where each glyph pixel is output as an ascii character. use rusttype::{point, Font, Scale}; use std::io::Write; fn main() { let font = if let Some(font_path) = std::env::args().nth(1) { let font_path = std::env::current_dir().unwrap().join(font_path); let data = std::fs::read(&font_path).unwrap(); Font::try_from_vec(data).unwrap_or_else(|| { panic!("error constructing a Font from data at {:?}", font_path); }) } else { eprintln!("No font specified ... using WenQuanYiMicroHei.ttf"); let font_data = include_bytes!("../fonts/wqy-microhei/WenQuanYiMicroHei.ttf"); Font::try_from_bytes(font_data as &[u8]).expect("error constructing a Font from bytes") }; // Desired font pixel height let height: f32 = 12.4; // to get 80 chars across (fits most terminals); adjust as desired let pixel_height = height.ceil() as usize; // 2x scale in x direction to counter the aspect ratio of monospace characters. let scale = Scale { x: height * 2.0, y: height, }; // The origin of a line of text is at the baseline (roughly where // non-descending letters sit). We don't want to clip the text, so we shift // it down with an offset when laying it out. v_metrics.ascent is the // distance between the baseline and the highest edge of any glyph in // the font. That's enough to guarantee that there's no clipping. let v_metrics = font.v_metrics(scale); let offset = point(0.0, v_metrics.ascent); // Glyphs to draw for "RustType". Feel free to try other strings. let glyphs: Vec<_> = font.layout("RustType", scale, offset).collect(); // Find the most visually pleasing width to display let width = glyphs .iter() .rev() .map(|g| g.position().x as f32 + g.unpositioned().h_metrics().advance_width) .next() .unwrap_or(0.0) .ceil() as usize; println!("width: {}, height: {}", width, pixel_height); // Rasterise directly into ASCII art. let mut pixel_data = vec![b'@'; width * pixel_height]; let mapping = b"@%#x+=:-. "; // The approximation of greyscale let mapping_scale = (mapping.len() - 1) as f32; for g in glyphs { if let Some(bb) = g.pixel_bounding_box() { g.draw(|x, y, v| { // v should be in the range 0.0 to 1.0 let i = (v * mapping_scale + 0.5) as usize; // so something's wrong if you get $ in the output. let c = mapping.get(i).cloned().unwrap_or(b'$'); let x = x as i32 + bb.min.x; let y = y as i32 + bb.min.y; // There's still a possibility that the glyph clips the boundaries of the bitmap if x >= 0 && x < width as i32 && y >= 0 && y < pixel_height as i32 { let x = x as usize; let y = y as usize; pixel_data[x + y * width] = c; } }) } } // Print it out let stdout = ::std::io::stdout(); let mut handle = stdout.lock(); for j in 0..pixel_height { handle .write_all(&pixel_data[j * width..(j + 1) * width]) .unwrap(); handle.write_all(b"\n").unwrap(); } }
use super::super::schema::design; #[derive(Queryable)] pub struct Design { pub id: i32, pub title: String, } #[derive(Insertable)] #[table_name = "design"] pub struct NewDesign<'a> { pub title: &'a str, }
#[doc = "Register `TIMx_SR` reader"] pub type R = crate::R<TIMX_SR_SPEC>; #[doc = "Register `TIMx_SR` writer"] pub type W = crate::W<TIMX_SR_SPEC>; #[doc = "Field `UIF` reader - UIF"] pub type UIF_R = crate::BitReader; #[doc = "Field `UIF` writer - UIF"] pub type UIF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `CC1IF` reader - CC1IF"] pub type CC1IF_R = crate::BitReader; #[doc = "Field `CC1IF` writer - CC1IF"] pub type CC1IF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `COMIF` reader - COMIF"] pub type COMIF_R = crate::BitReader; #[doc = "Field `COMIF` writer - COMIF"] pub type COMIF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `BIF` reader - BIF"] pub type BIF_R = crate::BitReader; #[doc = "Field `BIF` writer - BIF"] pub type BIF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `CC1OF` reader - CC1OF"] pub type CC1OF_R = crate::BitReader; #[doc = "Field `CC1OF` writer - CC1OF"] pub type CC1OF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; impl R { #[doc = "Bit 0 - UIF"] #[inline(always)] pub fn uif(&self) -> UIF_R { UIF_R::new((self.bits & 1) != 0) } #[doc = "Bit 1 - CC1IF"] #[inline(always)] pub fn cc1if(&self) -> CC1IF_R { CC1IF_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 5 - COMIF"] #[inline(always)] pub fn comif(&self) -> COMIF_R { COMIF_R::new(((self.bits >> 5) & 1) != 0) } #[doc = "Bit 7 - BIF"] #[inline(always)] pub fn bif(&self) -> BIF_R { BIF_R::new(((self.bits >> 7) & 1) != 0) } #[doc = "Bit 9 - CC1OF"] #[inline(always)] pub fn cc1of(&self) -> CC1OF_R { CC1OF_R::new(((self.bits >> 9) & 1) != 0) } } impl W { #[doc = "Bit 0 - UIF"] #[inline(always)] #[must_use] pub fn uif(&mut self) -> UIF_W<TIMX_SR_SPEC, 0> { UIF_W::new(self) } #[doc = "Bit 1 - CC1IF"] #[inline(always)] #[must_use] pub fn cc1if(&mut self) -> CC1IF_W<TIMX_SR_SPEC, 1> { CC1IF_W::new(self) } #[doc = "Bit 5 - COMIF"] #[inline(always)] #[must_use] pub fn comif(&mut self) -> COMIF_W<TIMX_SR_SPEC, 5> { COMIF_W::new(self) } #[doc = "Bit 7 - BIF"] #[inline(always)] #[must_use] pub fn bif(&mut self) -> BIF_W<TIMX_SR_SPEC, 7> { BIF_W::new(self) } #[doc = "Bit 9 - CC1OF"] #[inline(always)] #[must_use] pub fn cc1of(&mut self) -> CC1OF_W<TIMX_SR_SPEC, 9> { CC1OF_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u16) -> &mut Self { self.bits = bits; self } } #[doc = "TIM16/TIM17 status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`timx_sr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`timx_sr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct TIMX_SR_SPEC; impl crate::RegisterSpec for TIMX_SR_SPEC { type Ux = u16; } #[doc = "`read()` method returns [`timx_sr::R`](R) reader structure"] impl crate::Readable for TIMX_SR_SPEC {} #[doc = "`write(|w| ..)` method takes [`timx_sr::W`](W) writer structure"] impl crate::Writable for TIMX_SR_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets TIMx_SR to value 0"] impl crate::Resettable for TIMX_SR_SPEC { const RESET_VALUE: Self::Ux = 0; }
use crate::arena::ArenaMut; use crate::libs::js_object::Object; use crate::libs::random_id::U128Id; use async_trait::async_trait; use std::cell::RefCell; use std::rc::Rc; use wasm_bindgen::{prelude::*, JsCast}; #[derive(PartialEq, Eq, Clone, Copy, Hash)] pub enum PackDepth { OnlyId, Recursive, FirstBlock, } #[async_trait(?Send)] pub trait Pack { async fn pack(&self, pack_depth: PackDepth) -> JsValue; async fn unpack(data: &JsValue, arena: ArenaMut) -> Option<Box<Self>>; } #[async_trait(?Send)] impl Pack for U128Id { async fn pack(&self, _: PackDepth) -> JsValue { self.to_jsvalue() } async fn unpack(data: &JsValue, _arena: ArenaMut) -> Option<Box<Self>> { Self::from_jsvalue(data).map(Box::new) } } #[async_trait(?Send)] impl Pack for bool { async fn pack(&self, _: PackDepth) -> JsValue { JsValue::from(*self) } async fn unpack(data: &JsValue, _arena: ArenaMut) -> Option<Box<Self>> { data.as_bool().map(Box::new) } } #[async_trait(?Send)] impl Pack for String { async fn pack(&self, _: PackDepth) -> JsValue { JsValue::from(self) } async fn unpack(data: &JsValue, _arena: ArenaMut) -> Option<Box<Self>> { data.as_string().map(Box::new) } } #[async_trait(?Send)] impl Pack for f64 { async fn pack(&self, _: PackDepth) -> JsValue { JsValue::from(*self) } async fn unpack(data: &JsValue, _arena: ArenaMut) -> Option<Box<Self>> { data.as_f64().map(Box::new) } } #[async_trait(?Send)] impl Pack for usize { async fn pack(&self, _: PackDepth) -> JsValue { JsValue::from(*self as f64) } async fn unpack(data: &JsValue, _arena: ArenaMut) -> Option<Box<Self>> { data.as_f64().map(|x| x as usize).map(Box::new) } } #[async_trait(?Send)] impl Pack for u32 { async fn pack(&self, _: PackDepth) -> JsValue { JsValue::from(*self as f64) } async fn unpack(data: &JsValue, _arena: ArenaMut) -> Option<Box<Self>> { data.as_f64().map(|x| x as u32).map(Box::new) } } #[async_trait(?Send)] impl Pack for [f64; 2] { async fn pack(&self, _: PackDepth) -> JsValue { array![self[0], self[1]].into() } async fn unpack(data: &JsValue, _arena: ArenaMut) -> Option<Box<Self>> { let data = js_sys::Array::from(data).to_vec(); let data_0 = unwrap!(data.get(0).and_then(|x| x.as_f64()); None); let data_1 = unwrap!(data.get(1).and_then(|x| x.as_f64()); None); Some(Box::new([data_0, data_1])) } } #[async_trait(?Send)] impl Pack for [f64; 3] { async fn pack(&self, _: PackDepth) -> JsValue { array![self[0], self[1], self[2]].into() } async fn unpack(data: &JsValue, _arena: ArenaMut) -> Option<Box<Self>> { let data = js_sys::Array::from(data).to_vec(); let data_0 = unwrap!(data.get(0).and_then(|x| x.as_f64()); None); let data_1 = unwrap!(data.get(1).and_then(|x| x.as_f64()); None); let data_2 = unwrap!(data.get(2).and_then(|x| x.as_f64()); None); Some(Box::new([data_0, data_1, data_2])) } } #[async_trait(?Send)] impl Pack for [i32; 3] { async fn pack(&self, _: PackDepth) -> JsValue { array![self[0] as f64, self[1] as f64, self[2] as f64].into() } async fn unpack(data: &JsValue, _arena: ArenaMut) -> Option<Box<Self>> { let data = js_sys::Array::from(data).to_vec(); let data_0 = unwrap!(data.get(0).and_then(|x| x.as_f64()).map(|x| x as i32); None); let data_1 = unwrap!(data.get(1).and_then(|x| x.as_f64()).map(|x| x as i32); None); let data_2 = unwrap!(data.get(2).and_then(|x| x.as_f64()).map(|x| x as i32); None); Some(Box::new([data_0, data_1, data_2])) } } #[async_trait(?Send)] impl Pack for chrono::DateTime<chrono::Utc> { async fn pack(&self, _: PackDepth) -> JsValue { JsValue::from(self.to_rfc3339()) } async fn unpack(data: &JsValue, _arena: ArenaMut) -> Option<Box<Self>> { let data = unwrap!(data.as_string(); None); chrono::DateTime::parse_from_rfc3339(data.as_str()) .ok() .map(|x| x.with_timezone(&chrono::Utc)) .map(Box::new) } } #[async_trait(?Send)] impl Pack for crate::libs::color::Pallet { async fn pack(&self, _: PackDepth) -> JsValue { self.to_jsvalue() } async fn unpack(data: &JsValue, _arena: ArenaMut) -> Option<Box<Self>> { Self::from_jsvalue(data).map(Box::new) } } #[async_trait(?Send)] impl<T: Pack> Pack for Vec<T> { async fn pack(&self, pack_depth: PackDepth) -> JsValue { crate::debug::log_1("Vec::pack"); let list = js_sys::Array::new(); for item in self { crate::debug::log_1("Vec::pack : item"); list.push(&item.pack(pack_depth).await); } list.into() } async fn unpack(data: &JsValue, arena: ArenaMut) -> Option<Box<Self>> { let data = js_sys::Array::from(data).to_vec(); let mut this = vec![]; for item in data { if let Some(item) = T::unpack(&item, ArenaMut::clone(&arena)).await { this.push(*item); } } Some(Box::new(this)) } } #[async_trait(?Send)] impl<T: Pack> Pack for Rc<T> { async fn pack(&self, pack_depth: PackDepth) -> JsValue { <T as Pack>::pack(self.as_ref(), pack_depth).await } async fn unpack(data: &JsValue, arena: ArenaMut) -> Option<Box<Self>> { let data = T::unpack(data, arena).await; if let Some(data) = data { Some(Box::new(Rc::new(*data))) } else { None } } } #[async_trait(?Send)] impl<T: Pack> Pack for Rc<RefCell<T>> { async fn pack(&self, pack_depth: PackDepth) -> JsValue { self.borrow().pack(pack_depth).await } async fn unpack(data: &JsValue, arena: ArenaMut) -> Option<Box<Self>> { let data = T::unpack(data, arena).await; if let Some(data) = data { Some(Box::new(Rc::new(RefCell::new(*data)))) } else { None } } } #[async_trait(?Send)] impl<T: Pack> Pack for Option<T> { async fn pack(&self, pack_depth: PackDepth) -> JsValue { match self { Some(x) => (object! { "_tag": "Some", "_val": x.pack(pack_depth).await }) .into(), None => (object! { "_tag": "None", "_val": JsValue::null() }) .into(), } } async fn unpack(data: &JsValue, arena: ArenaMut) -> Option<Box<Self>> { let data = unwrap!(data.dyn_ref::<crate::libs::js_object::Object>(); None); let tag = unwrap!(data.get("_tag").and_then(|x| x.as_string()); None); match tag.as_str() { "Some" => { let val = unwrap!(data.get("_val"); None); let val = T::unpack(&val, arena).await; if let Some(val) = val { Some(Box::new(Some(*val))) } else { None } } "None" => Some(Box::new(None)), _ => None, } } } #[async_trait(?Send)] impl<T: Pack, U: Pack> Pack for (T, U) { async fn pack(&self, pack_depth: PackDepth) -> JsValue { array![self.0.pack(pack_depth).await, self.1.pack(pack_depth).await].into() } async fn unpack(data: &JsValue, arena: ArenaMut) -> Option<Box<Self>> { let data = js_sys::Array::from(data).to_vec(); if let Some(data) = join_some!(data.get(0), data.get(1)) { let data_0 = T::unpack(&data.0, ArenaMut::clone(&arena)).await; let data_1 = U::unpack(&data.1, ArenaMut::clone(&arena)).await; if let Some((data_0, data_1)) = join_some!(data_0, data_1) { return Some(Box::new((*data_0, *data_1))); } } None } } #[async_trait(?Send)] impl Pack for regex::Regex { async fn pack(&self, _: PackDepth) -> JsValue { JsValue::from(self.as_str()) } async fn unpack(data: &JsValue, arena: ArenaMut) -> Option<Box<Self>> { data.as_string() .and_then(|data| regex::Regex::new(data.as_str()).ok()) .map(|x| Box::new(x)) } } #[async_trait(?Send)] impl<T: Pack> Pack for crate::libs::select_list::SelectList<T> { async fn pack(&self, pack_depth: PackDepth) -> JsValue { let data = js_sys::Array::new(); for item in self.iter() { data.push(&item.pack(pack_depth).await); } (object! { "selected": self.selected_idx(), "data": data }) .into() } async fn unpack(data: &JsValue, arena: ArenaMut) -> Option<Box<Self>> { if let Some(data) = data.dyn_ref::<Object>() { let selected = data .get("selected") .and_then(|x| x.as_f64()) .map(|x| x as usize); if let Some((selected, data)) = join_some!(selected, data.get("data")) { let data = js_sys::Array::from(&data).to_vec(); let mut payload = vec![]; for item in data { let item = T::unpack(&item, ArenaMut::clone(&arena)).await; if let Some(item) = item { payload.push(*item); } } return Some(Box::new(Self::new(payload, selected))); } } None } } #[async_trait(?Send)] impl<K: Pack + Eq + std::hash::Hash, T: Pack> Pack for std::collections::HashMap<K, T> { async fn pack(&self, pack_depth: PackDepth) -> JsValue { let data = js_sys::Array::new(); for (key, value) in self { let key = key.pack(pack_depth).await; let value = value.pack(pack_depth).await; data.push(array![&key, &value].as_ref()); } data.into() } async fn unpack(data: &JsValue, arena: ArenaMut) -> Option<Box<Self>> { let data = js_sys::Array::from(data).to_vec(); let mut this = std::collections::HashMap::new(); for item in data { let item = js_sys::Array::from(&item).to_vec(); if let Some((key, value)) = join_some!(item.get(0), item.get(1)) { let key = K::unpack(&key, ArenaMut::clone(&arena)).await; let value = T::unpack(&value, ArenaMut::clone(&arena)).await; if let Some((key, value)) = join_some!(key, value) { this.insert(*key, *value); } } } Some(Box::new(this)) } }
//! Convenience methods for working with arrays. /// Implemented by arrays of different lengths. pub trait Array<T> { /// Creates array from a function of each component index. fn from_fn<F>(f: F) -> Self where F: FnMut(usize) -> T; /// Creates an array from an iterator. /// Will fail if the iterator does not contain enough elements. fn from_iter<I: Iterator<Item=T>>(mut iter: I) -> Self where Self: Sized { Array::from_fn(|_| { iter.next().unwrap() }) } } impl<T> Array<T> for [T; 2] { fn from_fn<F>(mut f: F) -> [T; 2] where F: FnMut(usize) -> T { [f(0), f(1)] } } /// An array with 2 components. pub trait Array2<T> { /// Converts array into another type, /// by executing a function for each component. fn map<U, F>(self, f: F) -> [U; 2] where F: Fn(T) -> U; /// Returns the `x` component. fn x(self) -> T; /// Returns the `y` component. fn y(self) -> T; } impl<T: Copy> Array2<T> for [T; 2] { fn map<U, F>(self, f: F) -> [U; 2] where F: Fn(T) -> U { [f(self[0]), f(self[1])] } fn x(self) -> T { self[0] } fn y(self) -> T { self[1] } } impl<T> Array<T> for [T; 3] { fn from_fn<F>(mut f: F) -> [T; 3] where F: FnMut(usize) -> T { [f(0), f(1), f(2)] } } /// An array with 3 components. pub trait Array3<T> { /// Converts array into another type, /// by executing a function for each component. fn map<U, F>(self, f: F) -> [U; 3] where F: Fn(T) -> U; /// Returns the `x` component. fn x(self) -> T; /// Returns the `y` component. fn y(self) -> T; /// Returns the `z` component. fn z(self) -> T; } impl<T: Copy> Array3<T> for [T; 3] { fn map<U, F>(self, f: F) -> [U; 3] where F: Fn(T) -> U { [f(self[0]), f(self[1]), f(self[2])] } fn x(self) -> T { self[0] } fn y(self) -> T { self[1] } fn z(self) -> T { self[2] } } impl<T> Array<T> for [T; 4] { fn from_fn<F>(mut f: F) -> [T; 4] where F: FnMut(usize) -> T { [f(0), f(1), f(2), f(3)] } } /// An array with 4 components. pub trait Array4<T> { /// Converts array into another type, /// by executing a function for each component. fn map<U, F>(self, f: F) -> [U; 4] where F: Fn(T) -> U; /// Returns the `x` component. fn x(self) -> T; /// Returns the `y` component. fn y(self) -> T; /// Returns the `z` component. fn z(self) -> T; /// Returns the `w` component. fn w(self) -> T; } impl<T: Copy> Array4<T> for [T; 4] { fn map<U, F>(self, f: F) -> [U; 4] where F: Fn(T) -> U { [f(self[0]), f(self[1]), f(self[2]), f(self[3])] } fn x(self) -> T { self[0] } fn y(self) -> T { self[1] } fn z(self) -> T { self[2] } fn w(self) -> T { self[3] } } impl<T> Array<T> for [T; 16] { fn from_fn<F>(mut f: F) -> [T; 16] where F: FnMut(usize) -> T { [ f(0), f(1), f(2), f(3), f(4), f(5), f(6), f(7), f(8), f(9), f(10),f(11), f(12),f(13),f(14),f(15) ] } }
use std::fs; use std::io::{self, Error as IoError, Read, Write}; use anyhow::Context; use byteorder::{BigEndian, ByteOrder}; use clap::{App, Arg}; use encoding_rs::Encoding; use kbinxml::{EncodingType, Options, Printer}; fn display_buf(buf: &[u8]) -> Result<(), IoError> { io::stdout().write_all(buf)?; println!(); Ok(()) } fn compare_slice(left: &[u8], right: &[u8]) { let node_buf_length = BigEndian::read_u32(&left[4..8]); let data_buf_start = 8 + node_buf_length as usize; let mut i = 0; let mut mismatches = Vec::new(); while i < left.len() && i < right.len() { if left[i] != right[i] { mismatches.push((i, left[i], right[i])); } i += 1; } if let Some(ref first) = mismatches.first() { eprintln!("Left does not equal right at the following indexes:"); for (i, left, right) in &mismatches { let (section, offset) = if *i < data_buf_start { ("node buffer", (*i as isize) - 8) } else { ("data buffer", (*i as isize) - 4 - (data_buf_start as isize)) }; eprintln!( "index {0} ({3}, offset: {4}), left: {1:3} (0x{1:x}),\tright: {2:3} (0x{2:x})", i, left, right, section, offset ); } let (i, _, _) = first; eprintln!( r#" left: `0x{:02x?}` right: `0x{:02x?}`"#, &left[*i..], &right[*i..] ); } } fn main() -> Result<(), anyhow::Error> { pretty_env_logger::init(); let matches = App::new("kbinxml") .about(env!("CARGO_PKG_DESCRIPTION")) .version(env!("CARGO_PKG_VERSION")) .author("Matt Bilker <me@mbilker.us>") .arg( Arg::new("printer") .help("Turn on the NodeCollection and NodeDefinition debug printer") .short('p') .long("printer"), ) .arg( Arg::new("encoding") .help("Set the encoding used when encoding kbin data") .short('e') .long("encoding") .takes_value(true), ) .arg( Arg::new("input") .help("The file to convert") .index(1) .required(true), ) .get_matches(); let printer_enabled = matches.is_present("printer"); let file_name = matches.value_of("input").unwrap(); let output_encoding = if let Some(label) = matches.value_of("encoding") { let encoding = Encoding::for_label(label.as_bytes()).context("No encoding found for label")?; Some(EncodingType::from_encoding(encoding)?) } else { None }; eprintln!("file_name: {}", file_name); // Read '-' as standard input. let contents = if file_name == "-" { let mut contents = Vec::new(); io::stdin().read_to_end(&mut contents)?; contents } else { fs::read(file_name)? }; if kbinxml::is_binary_xml(&contents) { if printer_enabled { Printer::run(contents.clone())?; } let (collection, _encoding) = kbinxml::from_slice(&contents)?; let text_original = kbinxml::to_text_xml(&collection)?; display_buf(&text_original)?; let (collection, encoding_original) = kbinxml::from_slice(&contents)?; let options = Options::with_encoding(output_encoding.unwrap_or(encoding_original)); let buf = kbinxml::to_binary_with_options(options, &collection)?; compare_slice(&buf, &contents); } else { let (collection, encoding) = kbinxml::from_text_xml(&contents)?; let options = Options::with_encoding(output_encoding.unwrap_or(encoding)); let buf = kbinxml::to_binary_with_options(options, &collection)?; if printer_enabled { Printer::run(buf.clone())?; } io::stdout().write_all(&buf)?; } Ok(()) }
use reqwest::header::{AUTHORIZATION, HeaderName, HeaderValue}; use base64::encode; use crate::authentication::Authentication; #[derive(Debug)] pub struct BasicAuth { username: String, password: Option<String>, } impl BasicAuth { pub fn new(username: String, password: Option<String>) -> BasicAuth { BasicAuth { username, password, } } } impl Authentication for BasicAuth { fn username(&self) -> Option<String> { Some(self.username.clone()) } fn password(&self) -> Option<String> { self.password.clone() } fn as_header(&self) -> Option<(HeaderName, HeaderValue)> { let auth = match self.password() { Some(password) => format!("{}:{}", self.username, password), None => format!("{}:", self.username) }; let header_value = format!("Basic {}", encode(&auth)); Some((AUTHORIZATION, HeaderValue::from_str(&*header_value).unwrap())) } }
use core::marker::PhantomData; use alloc::vec::Vec; use necsim_core::cogs::{ Backup, DispersalSampler, EmigrationExit, Habitat, ImmigrationEntry, LineageReference, LocallyCoherentLineageStore, RngCore, SpeciationProbability, }; use necsim_core_bond::{NonNegativeF64, PositiveF64}; mod sampler; #[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub struct ClassicalActiveLineageSampler< H: Habitat, G: RngCore, R: LineageReference<H>, S: LocallyCoherentLineageStore<H, R>, X: EmigrationExit<H, G, R, S>, D: DispersalSampler<H, G>, N: SpeciationProbability<H>, I: ImmigrationEntry, > { active_lineage_references: Vec<R>, last_event_time: NonNegativeF64, next_event_time: Option<PositiveF64>, _marker: PhantomData<(H, G, S, X, D, N, I)>, } impl< H: Habitat, G: RngCore, R: LineageReference<H>, S: LocallyCoherentLineageStore<H, R>, X: EmigrationExit<H, G, R, S>, D: DispersalSampler<H, G>, N: SpeciationProbability<H>, I: ImmigrationEntry, > ClassicalActiveLineageSampler<H, G, R, S, X, D, N, I> { #[must_use] pub fn new(lineage_store: &S) -> Self { Self { active_lineage_references: lineage_store .iter_local_lineage_references() .filter(|local_reference| lineage_store.get(local_reference.clone()).is_some()) .collect(), last_event_time: NonNegativeF64::zero(), next_event_time: None, _marker: PhantomData::<(H, G, S, X, D, N, I)>, } } } #[contract_trait] impl< H: Habitat, G: RngCore, R: LineageReference<H>, S: LocallyCoherentLineageStore<H, R>, X: EmigrationExit<H, G, R, S>, D: DispersalSampler<H, G>, N: SpeciationProbability<H>, I: ImmigrationEntry, > Backup for ClassicalActiveLineageSampler<H, G, R, S, X, D, N, I> { unsafe fn backup_unchecked(&self) -> Self { Self { active_lineage_references: self.active_lineage_references.clone(), last_event_time: self.last_event_time, next_event_time: self.next_event_time, _marker: PhantomData::<(H, G, S, X, D, N, I)>, } } }
extern crate wayland_scanner; use std::env::var; use std::path::Path; use wayland_scanner::{Side, generate_code}; // Location of the xml file, relative to the `Cargo.toml` fn main() { let protocol_file = "./idle.xml"; // Target directory for the generate files let out_dir_str = var("OUT_DIR").unwrap(); let out_dir = Path::new(&out_dir_str); generate_code( protocol_file, out_dir.join("idle_client_api.rs"), Side::Client, // Replace by `Side::Server` for server-side code ); }
//! Nodes that have children and tick them in a sequential order as long as they //! succeed. use crate::{ node::{Node, Tickable}, Status, }; /// A node that will tick its children in order as long as they succeed. /// /// This node will tick all of its children in order until one of them returns /// either `Status::Running` or `Status::Failed`. If none do, this node /// succeeds. /// /// The difference between this node and the normal `Sequence` is that this /// node will always begin ticking from its first child, where as the normal /// version will resume ticking with the node that previously returned that it /// was running. This makes the active version better for things that must be /// checked each tick (e.g., if motors are too hot) and the normal version /// better for completing series of actions. /// /// Due to the reticking, some nodes that succeeded on previous ticks may fail /// on later ticks. /// /// This node is equivalent to an "and" statement. /// /// # State /// /// **Initialized:** Before being ticked after being created or reset. /// /// **Running:** The latest ticked child node return that it was running. /// /// **Succeeded:** All child nodes succeeded. /// /// **Failed:** All child nodes failed. /// /// # Children /// /// Any number of children. A child node will be ticked every time this node is /// ticked as long as all the sibling nodes to the left succeeded. /// /// Note that, if a node is running and a sibling to the left returned either /// failure or running, the child node will be reset. Additionally, the children /// will be reset each time the parent is. /// /// # Examples /// /// A node that returns success: /// /// ``` /// # use aspen::std_nodes::*; /// # use aspen::Status; /// # use aspen::node::Tickable; /// let mut node = ActiveSequence::new() /// .with_child(AlwaysSucceed::new()) /// .with_child(AlwaysSucceed::new()) /// .with_child(AlwaysSucceed::new()); /// /// assert_eq!(node.tick(&mut ()), Status::Succeeded); /// ``` /// /// A node that returns it is running: /// /// ``` /// # use aspen::std_nodes::*; /// # use aspen::Status; /// # use aspen::node::Tickable; /// let mut node = ActiveSequence::new() /// .with_child(AlwaysSucceed::new()) /// .with_child(AlwaysRunning::new()) /// .with_child(AlwaysFail::new()); /// /// assert_eq!(node.tick(&mut ()), Status::Running); /// ``` /// /// A node that returns it failed: /// /// ``` /// # use aspen::std_nodes::*; /// # use aspen::Status; /// # use aspen::node::Tickable; /// let mut node = ActiveSequence::new() /// .with_child(AlwaysSucceed::new()) /// .with_child(AlwaysSucceed::new()) /// .with_child(AlwaysFail::new()); /// /// assert_eq!(node.tick(&mut ()), Status::Failed); /// ``` pub struct ActiveSequence<'a, W> { /// Vector containing the children of this node. children: Vec<Node<'a, W>>, } impl<'a, W> ActiveSequence<'a, W> where W: 'a, { /// Creates a new `ActiveSequence` node from a vector of Nodes. pub fn new() -> Self { ActiveSequence { children: Vec::new(), } } pub fn with_child<T>(mut self, child: T) -> Self where T: Tickable<W> + 'a, { self.children.push(child.into_node()); self } pub fn with_children<T>(mut self, children: Vec<T>) -> Self where T: Tickable<W> + 'a, { self.children = children.into_iter().map(Tickable::into_node).collect(); self } } impl<'a, W> Tickable<W> for ActiveSequence<'a, W> { fn tick(&mut self, world: &mut W) -> Status { // Tick all of our children as long as they succeed let mut ret_status = Status::Succeeded; for child in &mut self.children { if ret_status == Status::Succeeded { ret_status = child.tick(world); } else { child.reset(); } } // Return whatever result we found ret_status } fn reset(&mut self) { // Reset all of our children for child in &mut self.children { child.reset(); } } fn children(&self) -> Vec<&Node<W>> { self.children.iter().collect() } /// Returns the string "ActiveSequence". fn type_name(&self) -> &'static str { "ActiveSequence" } } /// Convenience macro for creating ActiveSequence nodes. /// /// # Examples /// /// ``` /// # #[macro_use] extern crate aspen; /// # fn main() { /// let active_sequence = ActiveSequence! { /// Condition!{ |&(a, _): &(u32, u32)| a < 12 }, /// Condition!{ |&(_, b)| b == 9 }, /// Condition!{ |&(a, b)| b < a } /// }; /// # } /// ``` #[macro_export] macro_rules! ActiveSequence { ( $( $e:expr ),* ) => { $crate::std_nodes::ActiveSequence::new().with_children(vec![$( $e ),*]) }; } /// A node that will tick its children in order as long as they succeed. /// /// This node will tick all of its children in order until one of them returns /// either `Status::Running` or `Status::Failed`. If none do, this node /// succeeds. /// /// The difference between this node and an `ActiveSequence` is that this node /// will resume ticking at the last running node whereas the active version will /// always restart ticking from the beginning. That makes the active sequence /// good for things that always need to be rechecked and this version good for /// completing actions. Once a node is ticked to completion, this version will /// *not* revisit it. /// /// This node is equivalent to an "and" statement. /// /// # State /// /// **Initialized:** Before being ticked after being created or reset. /// /// **Running:** The latest ticked child node return that it was running. /// /// **Succeeded:** All child nodes succeeded. /// /// **Failed:** All child nodes failed. /// /// # Children /// /// Any number of children. A child node will only be ticked if all the nodes /// to the left succeeded and this node has not yet completed. /// /// Unlike the active version, children nodes will only be reset when this node /// is reset. /// /// # Examples /// /// A node that returns success: /// /// ``` /// # use aspen::std_nodes::*; /// # use aspen::Status; /// # use aspen::node::Tickable; /// let mut node = Sequence::new(vec![ /// AlwaysSucceed::new(), /// AlwaysSucceed::new(), /// AlwaysSucceed::new(), /// ]); /// assert_eq!(node.tick(&mut ()), Status::Succeeded); /// ``` /// /// A node that returns it is running: /// /// ``` /// # use aspen::std_nodes::*; /// # use aspen::Status; /// # use aspen::node::Tickable; /// let mut node = Sequence::new(vec![ /// AlwaysSucceed::new(), /// AlwaysRunning::new(), /// AlwaysFail::new(), /// ]); /// assert_eq!(node.tick(&mut ()), Status::Running); /// ``` /// /// A node that returns it failed: /// /// ``` /// # use aspen::std_nodes::*; /// # use aspen::Status; /// # use aspen::node::Tickable; /// let mut node = Sequence::new(vec![ /// AlwaysSucceed::new(), /// AlwaysSucceed::new(), /// AlwaysFail::new(), /// ]); /// assert_eq!(node.tick(&mut ()), Status::Failed); /// ``` pub struct Sequence<'a, W> { /// Vector containing the children of this node. children: Vec<Node<'a, W>>, next_child: usize, } impl<'a, W> Sequence<'a, W> where W: 'a, { /// Creates a new `Sequence` node from a vector of Nodes. pub fn new(children: Vec<Node<'a, W>>) -> Node<'a, W> { let internals = Sequence { children, next_child: 0, }; Node::new(internals) } } impl<'a, W> Tickable<W> for Sequence<'a, W> { fn tick(&mut self, world: &mut W) -> Status { // Tick the children as long as they keep failing let mut ret_status = Status::Succeeded; while self.next_child < self.children.len() && ret_status == Status::Succeeded { ret_status = self.children[self.next_child].tick(world); if ret_status.is_done() { self.next_child += 1; } } ret_status } fn reset(&mut self) { // Reset all of our children for child in &mut self.children { child.reset(); } self.next_child = 0; } fn children(&self) -> Vec<&Node<W>> { self.children.iter().collect() } /// Returns the string "Sequence". fn type_name(&self) -> &'static str { "Sequence" } } /// Convenience macro for creating Selector nodes. /// /// # Examples /// /// ``` /// # #[macro_use] extern crate aspen; /// # fn main() { /// let selector = Selector! { /// Condition!{ |&(a, _): &(u32, u32)| a < 12 }, /// Condition!{ |&(_, b)| b == 9 }, /// Condition!{ |&(a, b)| b < a } /// }; /// # } /// ``` #[macro_export] macro_rules! Sequence { ( $( $e:expr ),* ) => { $crate::std_nodes::Sequence::new(vec![$( $e ),*]) }; } #[cfg(test)] mod tests { use crate::{ node::Tickable, std_nodes::{ActiveSequence, NoTick, Sequence, YesTick}, Status, }; #[test] fn check_running() { // Set up the nodes let children = vec![ YesTick::new(Status::Succeeded), YesTick::new(Status::Running), NoTick::new(), ]; // Add them to a sequence node let mut seq = Sequence::new(children); // Tick the sequence let status = seq.tick(&mut ()); // Drop the sequence so the nodes can do their own checks drop(seq); // Make sure we got the expected value assert_eq!(status, Status::Running); } #[test] fn check_success() { // Set up the nodes let children = vec![ YesTick::new(Status::Succeeded), YesTick::new(Status::Succeeded), ]; // Add them to a sequence node let mut seq = Sequence::new(children); // Tick the sequence let status = seq.tick(&mut ()); // Drop the sequence so the nodes can do their own checks drop(seq); // Make sure we got the expected value assert_eq!(status, Status::Succeeded); } #[test] fn check_fail() { // Set up the nodes let children = vec![ YesTick::new(Status::Succeeded), YesTick::new(Status::Failed), NoTick::new(), ]; // Add them to a sequence node let mut seq = Sequence::new(children); // Tick the sequence let status = seq.tick(&mut ()); // Drop the sequence so the nodes can do their own checks drop(seq); // Make sure we got the expected value assert_eq!(status, Status::Failed); } #[test] fn check_active_running() { // Set up the nodes let children = vec![ YesTick::new(Status::Succeeded), YesTick::new(Status::Running), NoTick::new(), ]; // Add them to a sequence node let mut seq = ActiveSequence::new().with_children(children); // Tick the sequence let status = seq.tick(&mut ()); // Drop the sequence so the nodes can do their own checks drop(seq); // Make sure we got the expected value assert_eq!(status, Status::Running); } #[test] fn check_active_success() { // Set up the nodes let children = vec![ YesTick::new(Status::Succeeded), YesTick::new(Status::Succeeded), ]; // Add them to a sequence node let mut seq = ActiveSequence::new().with_children(children); // Tick the sequence let status = seq.tick(&mut ()); // Drop the sequence so the nodes can do their own checks drop(seq); // Make sure we got the expected value assert_eq!(status, Status::Succeeded); } #[test] fn check_active_fail() { // Set up the nodes let children = vec![ YesTick::new(Status::Succeeded), YesTick::new(Status::Failed), NoTick::new(), ]; // Add them to a sequence node let mut seq = ActiveSequence::new().with_children(children); // Tick the sequence let status = seq.tick(&mut ()); // Drop the sequence so the nodes can do their own checks drop(seq); // Make sure we got the expected value assert_eq!(status, Status::Failed); } }
#[cfg(test)] struct TestExpectation { input: i32, expectation: &'static str } #[test] fn it_works() { let tests = vec![ TestExpectation { input: -1, expectation: "negative one" }, TestExpectation { input: 0, expectation: "zero" }, TestExpectation { input: 1, expectation: "one" }, TestExpectation { input: 2, expectation: "two" }, TestExpectation { input: 3, expectation: "three" }, TestExpectation { input: 4, expectation: "four" }, TestExpectation { input: 5, expectation: "five" }, TestExpectation { input: 6, expectation: "six" }, TestExpectation { input: 7, expectation: "seven" }, TestExpectation { input: 8, expectation: "eight" }, TestExpectation { input: 9, expectation: "nine" }, TestExpectation { input: 10, expectation: "ten" }, TestExpectation { input: 11, expectation: "eleven" }, TestExpectation { input: 12, expectation: "twelve" }, TestExpectation { input: 13, expectation: "thirteen" }, TestExpectation { input: 14, expectation: "fourteen" }, TestExpectation { input: 15, expectation: "fifteen" }, TestExpectation { input: 16, expectation: "sixteen" }, TestExpectation { input: 17, expectation: "seventeen" }, TestExpectation { input: 18, expectation: "eighteen" }, TestExpectation { input: 19, expectation: "nineteen" }, TestExpectation { input: 20, expectation: "twenty" }, TestExpectation { input: 21, expectation: "twenty-one" }, TestExpectation { input: 22, expectation: "twenty-two" }, TestExpectation { input: 23, expectation: "twenty-three" }, TestExpectation { input: 24, expectation: "twenty-four" }, TestExpectation { input: 25, expectation: "twenty-five" }, TestExpectation { input: 26, expectation: "twenty-six" }, TestExpectation { input: 27, expectation: "twenty-seven" }, TestExpectation { input: 28, expectation: "twenty-eight" }, TestExpectation { input: 29, expectation: "twenty-nine" }, TestExpectation { input: 30, expectation: "thirty" }, TestExpectation { input: 39, expectation: "thirty-nine" }, TestExpectation { input: 49, expectation: "forty-nine" }, TestExpectation { input: 59, expectation: "fifty-nine" }, TestExpectation { input: 69, expectation: "sixty-nine" }, TestExpectation { input: 79, expectation: "seventy-nine" }, TestExpectation { input: 89, expectation: "eighty-nine" }, TestExpectation { input: 99, expectation: "ninety-nine" }, TestExpectation { input: 100, expectation: "one hundred" }, TestExpectation { input: 234, expectation: "two hundred and thirty-four" }, TestExpectation { input: 456, expectation: "four hundred and fifty-six" }, TestExpectation { input: 999, expectation: "nine hundred and ninety-nine" }, TestExpectation { input: 1000, expectation: "one thousand" }, TestExpectation { input: 2345, expectation: "two thousand three hundred and forty-five" }, TestExpectation { input: 34567, expectation: "thirty-four thousand five hundred and sixty-seven" }, TestExpectation { input: 456789, expectation: "four hundred and fifty-six thousand seven hundred and eighty-nine" } ]; for test in tests { assert_eq!(format(test.input), test.expectation) } } #[test] fn project_euler_test() { let tests = vec![ TestExpectation { input: 342, expectation: "three hundred and forty-two" }, TestExpectation { input: 115, expectation: "one hundred and fifteen" } ]; for test in tests { assert_eq!(format(test.input), test.expectation) } } pub fn format(num: i32) -> String { if num < 0 { "negative ".to_string() + &format(-num) } else if num >= 0 && num < 1000000 { format_lt_million(num) } else { panic!("Unable to format number: {}", num) } } fn format_lt_ten(num: i32) -> String { match num { 0 => "zero", 1 => "one", 2 => "two", 3 => "three", 4 => "four", 5 => "five", 6 => "six", 7 => "seven", 8 => "eight", 9 => "nine", _ => panic!("You shouldn't have passed {} to format_lt_ten", num) }.to_string() } fn format_lt_twenty(num: i32) -> String { if num < 10 { format_lt_ten(num) } else if num < 20 { match num { 10 => "ten".to_string(), 11 => "eleven".to_string(), 12 => "twelve".to_string(), 13 => "thirteen".to_string(), 15 => "fifteen".to_string(), 18 => "eighteen".to_string(), _ => { let ones_place = num % 10; format_lt_ten(ones_place).to_string() + "teen" } } } else { panic!("You shouldn't have passed {} to format_lt_twenty", num) } } fn format_lt_hundred(num: i32) -> String { if num < 20 { format_lt_twenty(num) } else { let tens_place = num / 10; let ones_place = num % 10; let tens_str = match tens_place { 2 => "twenty", 3 => "thirty", 4 => "forty", 5 => "fifty", 6 => "sixty", 7 => "seventy", 8 => "eighty", 9 => "ninety", _ => panic!("You shouldn't have passed {} to format_tens", num) }; if ones_place == 0 { tens_str.to_string() } else { tens_str.to_string() + "-" + &format_lt_ten(ones_place) } } } fn format_lt_thousand(num: i32) -> String { if num < 100 { format_lt_hundred(num) } else { let hundreds_place = num / 100; let tens = num % 100; let hundreds_str = format_lt_ten(hundreds_place) + " hundred"; if tens == 0 { hundreds_str } else { hundreds_str + " and " + &format_lt_hundred(tens) } } } fn format_lt_million(num: i32) -> String { if num < 1000 { format_lt_thousand(num) } else { let thousands_place = num / 1000; let hundreds = num % 1000; let thousands_str = format_lt_thousand(thousands_place) + " thousand"; if hundreds == 0 { thousands_str } else { thousands_str + " " + &format_lt_thousand(hundreds) } } }
use std::cmp::Ordering; use crate::{IsBot, IsTop, LatticeFrom, LatticeOrd, Merge}; /// A totally ordered max lattice. Merging returns the larger value. /// /// Note that the [`Default::default()`] value for numeric type is `MIN`, not zero. #[repr(transparent)] #[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Max<T>(T); impl<T> Max<T> { /// Create a new `Max` lattice instance from a `T`. pub fn new(val: T) -> Self { Self(val) } /// Create a new `Max` lattice instance from an `Into<T>` value. pub fn from(val: impl Into<T>) -> Self { Self::new(val.into()) } /// Reveal the inner value as a shared reference. pub fn as_reveal_ref(&self) -> &T { &self.0 } /// Reveal the inner value as an exclusive reference. pub fn as_reveal_mut(&mut self) -> &mut T { &mut self.0 } /// Gets the inner by value, consuming self. pub fn into_reveal(self) -> T { self.0 } } impl<T> Merge<Max<T>> for Max<T> where T: Ord, { fn merge(&mut self, other: Max<T>) -> bool { if self.0 < other.0 { self.0 = other.0; true } else { false } } } impl<T> LatticeFrom<Max<T>> for Max<T> { fn lattice_from(other: Max<T>) -> Self { other } } impl<T> LatticeOrd<Self> for Max<T> where Self: PartialOrd<Self> {} /// A totally ordered min lattice. Merging returns the smaller value. /// /// This means the lattice order is the reverse of what you might naturally expect: 0 is greater /// than 1. /// /// Note that the [`Default::default()`] value for numeric type is `MAX`, not zero. #[repr(transparent)] #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Min<T>(T); impl<T> Min<T> { /// Create a new `Min` lattice instance from a `T`. pub fn new(val: T) -> Self { Self(val) } /// Create a new `Min` lattice instance from an `Into<T>` value. pub fn new_from(val: impl Into<T>) -> Self { Self::new(val.into()) } /// Reveal the inner value as a shared reference. pub fn as_reveal_ref(&self) -> &T { &self.0 } /// Reveal the inner value as an exclusive reference. pub fn as_reveal_mut(&mut self) -> &mut T { &mut self.0 } /// Gets the inner by value, consuming self. pub fn into_reveal(self) -> T { self.0 } } impl<T> Merge<Min<T>> for Min<T> where T: Ord, { fn merge(&mut self, other: Min<T>) -> bool { if other.0 < self.0 { self.0 = other.0; true } else { false } } } impl<T> LatticeFrom<Min<T>> for Min<T> { fn lattice_from(other: Min<T>) -> Self { other } } impl<T> PartialOrd for Min<T> where T: PartialOrd, { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.0.partial_cmp(&other.0).map(Ordering::reverse) } } impl<T> LatticeOrd<Self> for Min<T> where Self: PartialOrd<Self> {} impl<T> Ord for Min<T> where T: Ord, { fn cmp(&self, other: &Self) -> Ordering { self.0.cmp(&other.0).reverse() } } // IsTop, IsBot, Default impls impl IsTop for Max<()> { fn is_top(&self) -> bool { true } } impl IsBot for Max<()> { fn is_bot(&self) -> bool { true } } impl IsTop for Min<()> { fn is_top(&self) -> bool { true } } impl IsBot for Min<()> { fn is_bot(&self) -> bool { true } } impl IsTop for Max<bool> { fn is_top(&self) -> bool { self.0 } } impl IsBot for Max<bool> { fn is_bot(&self) -> bool { !self.0 } } impl Default for Max<bool> { fn default() -> Self { Self(false) } } impl IsTop for Min<bool> { fn is_top(&self) -> bool { !self.0 } } impl IsBot for Min<bool> { fn is_bot(&self) -> bool { self.0 } } impl Default for Min<bool> { fn default() -> Self { Self(true) } } impl IsTop for Max<char> { fn is_top(&self) -> bool { char::MAX == self.0 } } impl IsBot for Max<char> { fn is_bot(&self) -> bool { '\x00' == self.0 } } impl Default for Max<char> { fn default() -> Self { Self('\x00') } } impl IsTop for Min<char> { fn is_top(&self) -> bool { '\x00' == self.0 } } impl IsBot for Min<char> { fn is_bot(&self) -> bool { char::MAX == self.0 } } impl Default for Min<char> { fn default() -> Self { Self(char::MAX) } } macro_rules! impls_numeric { ( $( $x:ty ),* ) => { $( impl IsTop for Max<$x> { fn is_top(&self) -> bool { <$x>::MAX == self.0 } } impl IsBot for Max<$x> { fn is_bot(&self) -> bool { <$x>::MIN == self.0 } } impl Default for Max<$x> { fn default() -> Self { Self(<$x>::MIN) } } impl IsTop for Min<$x> { fn is_top(&self) -> bool { <$x>::MIN == self.0 } } impl IsBot for Min<$x> { fn is_bot(&self) -> bool { <$x>::MAX == self.0 } } impl Default for Min<$x> { fn default() -> Self { Self(<$x>::MAX) } } )* }; } impls_numeric! { isize, i8, i16, i32, i64, i128, usize, u8, u16, u32, u64, u128 } #[cfg(test)] mod test { use std::cmp::Ordering::*; use super::*; use crate::test::check_all; #[test] fn ordering() { assert_eq!(Max::new(0).cmp(&Max::new(0)), Equal); assert_eq!(Max::new(0).cmp(&Max::new(1)), Less); assert_eq!(Max::new(1).cmp(&Max::new(0)), Greater); assert_eq!(Min::new(0).cmp(&Min::new(0)), Equal); assert_eq!(Min::new(0).cmp(&Min::new(1)), Greater); assert_eq!(Min::new(1).cmp(&Min::new(0)), Less); } #[test] fn eq() { assert!(Max::new(0).eq(&Max::new(0))); assert!(!Max::new(0).eq(&Max::new(1))); assert!(!Max::new(1).eq(&Max::new(0))); assert!(Min::new(0).eq(&Min::new(0))); assert!(!Min::new(0).eq(&Min::new(1))); assert!(!Min::new(1).eq(&Min::new(0))); } #[test] fn consistency_max_bool() { let items = &[Max::new(false), Max::new(true)]; check_all(items); } #[test] fn consistency_min_bool() { let items = &[Min::new(false), Min::new(true)]; check_all(items); } #[test] fn consistency_max_char() { let items: Vec<_> = "\x00\u{10FFFF}✨🤦‍♀️踊るx".chars().map(Max::new).collect(); check_all(&items); } #[test] fn consistency_min_char() { let items: Vec<_> = "\x00\u{10FFFF}✨🤦‍♀️踊るx".chars().map(Min::new).collect(); check_all(&items); } #[test] fn consistency_max_i32() { let items = &[ Max::new(0), Max::new(1), Max::new(i32::MIN), Max::new(i32::MAX), ]; check_all(items); } #[test] fn consistency_min_i32() { let items = &[ Min::new(0), Min::new(1), Min::new(i32::MIN), Min::new(i32::MAX), ]; check_all(items); } }
//! Decompress a compressed ELF32 relocation section //! //! This module can be used to decompress a compressed ELF32 relocation section. use crate::error::{Error, ErrorKind}; use crate::uleb128; /// Processes a compressed ELF32 relocation section and calls `op` for every /// relocation for further processing. /// /// # Errors /// /// If the compressed relocation section is malformed. /// /// # Panics /// /// If the provided data is too small for any reason and `no_bounds_check` /// feature is not requested. pub fn elf32_relocate<F>(data: &[u8], op: &mut F) -> Result<usize, Error> where F: FnMut(u8, u32) -> Result<(), Error>, { let base_address = read_u32_np(data)?; let mut count = slice_read_u8(data, 4)?; let mut index = 5; while count > 0 { index += elf32_relocate_group(array_from_slice_u8(data, index)?, base_address, op)?; count -= 1; } Ok(index) } /// Processes a single compressed relocation group. fn elf32_relocate_group<F>(data: &[u8], mut address: u32, op: &mut F) -> Result<usize, Error> where F: FnMut(u8, u32) -> Result<(), Error>, { let relocation_type = slice_read_u8(data, 0)?; let mut index = 1; let mut count = 0; index += uleb128::read_u32(array_from_slice_u8(data, 1)?, &mut count)?; while count > 0 { let mut offset = 0; index += uleb128::read_u32(array_from_slice_u8(data, index)?, &mut offset)?; address += offset; op(relocation_type, address)?; count -= 1; } Ok(index) } /// Reads an unsigned u32 value without panicing. fn read_u32_np(data: &[u8]) -> Result<u32, Error> { if cfg!(feature = "no_bounds_check") || data.len() >= 4 { Ok(unsafe { core::ptr::read(data.as_ptr() as *const u32) }) } else { Err(Error::new(ErrorKind::NotEnoughData)) } } /// Reads an unsigned 8-bit value from a byte slice without panicing. fn slice_read_u8(data: &[u8], index: usize) -> Result<u8, Error> { if cfg!(feature = "no_bounds_check") || data.len() > index { Ok(unsafe { *data.get_unchecked(index) }) } else { Err(Error::new(ErrorKind::NotEnoughData)) } } /// Creates a sub-slice with nonzero length from a slice without panicing. fn array_from_slice_u8<'a>(data: &'a [u8], offset: usize) -> Result<&'a [u8], Error> { if cfg!(feature = "no_bounds_check") || data.len() > offset { Ok(unsafe { core::slice::from_raw_parts(data.as_ptr().add(offset), data.len() - offset) }) } else { Err(Error::new(ErrorKind::NotEnoughData)) } } #[cfg(test)] mod tests { #[allow(unused)] use super::*; #[cfg(not(feature = "no_bounds_check"))] #[test] fn test_decompress_no_data() { elf32_relocate(&[0; 0], &mut |_, _| unreachable!()).unwrap_err(); } #[cfg(not(feature = "no_bounds_check"))] #[test] fn test_decompress_base_address_only() { elf32_relocate(&[0; 4], &mut |_, _| unreachable!()).unwrap_err(); } #[cfg(not(feature = "no_bounds_check"))] #[test] fn test_decompress_count_only() { elf32_relocate(&[1; 5], &mut |_, _| unreachable!()).unwrap_err(); } #[cfg(not(feature = "no_bounds_check"))] #[test] fn test_decompress_count_is_zero() { elf32_relocate(&[0; 5], &mut |_, _| unreachable!()).unwrap(); } #[cfg(not(feature = "no_bounds_check"))] #[test] fn test_decompress_group_reloc_type_no_data() { elf32_relocate(&[1; 6], &mut |_, _| unreachable!()).unwrap_err(); } #[cfg(not(feature = "no_bounds_check"))] #[test] fn test_decompress_group_count_no_data() { elf32_relocate(&[1; 6], &mut |_, _| unreachable!()).unwrap_err(); } #[cfg(not(feature = "no_bounds_check"))] #[test] fn test_decompress_group_offset_no_data() { elf32_relocate(&[1; 7], &mut |_, _| unreachable!()).unwrap_err(); } #[test] fn test_decompress_relocate_one() { let memory = [ 0x04, 0x03, 0x02, 0x01, // base_address 0x01, // count 0x01, // group[0].relocation_type 0x01, // group[0].count 0x00, // group[0].offsets[0] ]; let read = elf32_relocate(&memory, &mut |relocation_type, address| { assert_eq!(relocation_type, 0x01); assert_eq!(address, 0x01020304); Ok(()) }) .unwrap(); assert_eq!(read, 8); } }
use aoc_day_two::*; use std::fs::File; use std::path::Path; use std::io::{BufRead, BufReader}; use std::env; fn main () { let args: Vec<String> = env::args().collect(); if args.len() < 2 { panic!("Please provide a file path!") } let input_path = Path::new(&args[1]); let file_f: File = File::open(input_path) .expect("File could not be read or doesn't exist"); let file = BufReader::new(file_f); let input_box_ids = file.lines().map(|line|{ line.unwrap() }).collect(); println!("Part 1: {}", calculate_checksum(&input_box_ids)); println!("Part 2: {}", find_common_correct_box_id_part(&input_box_ids)); }
use crate::io::Logger; use colored::Colorize; use curl::easy::{Handler, WriteError}; use serde_json::Value; pub struct BuildImage { pub image_id: Option<String>, pub error_message: Option<String>, logger: Logger, } impl BuildImage { pub fn new(logger: &Logger) -> Self { let mut logger = logger.clone(); logger.set_log_file("log.txt"); Self { image_id: None, error_message: None, logger, } } } impl Handler for BuildImage { fn write(&mut self, data: &[u8]) -> Result<usize, WriteError> { if let Ok(logs) = std::str::from_utf8(&data) { for line in logs.lines() { if !line.trim().is_empty() { // Docker is sending us lines of json encoded strings on every write. // These look like: // {"stream":" ---\u003e Using cache\n"} // I don't know enough about this API to state definitively that the // "stream" values are all we care about, but it seems likely. Other // keys exist, such as: // {"aux":{"ID":"sha256:e821df6f41ad85f08c5fa08a228a34e164d93995e89be2d0d5edb9206a715347"}} // which looks like the id of the image that was built. Likely, we // neither care nor need to log it. if let Ok(json) = serde_json::from_str::<Value>(line) { if !json["stream"].is_null() { let value = json["stream"].as_str().unwrap(); let mut to_print = String::from(value); to_print = to_print .trim_end_matches(|c| c == '\n' || c == '\r') .to_string(); if to_print.starts_with("Step ") { to_print = to_print.white().bold().to_string(); } if !to_print.trim().is_empty() { self.logger.log(to_print).unwrap(); } } else if !json["aux"].is_null() { let line = json["aux"]["ID"].as_str().unwrap(); // fixme - This is a hack to remove "sha256:" from the string, but // it may not always use sha256, so this should be done right. let sha = &line[7..]; self.image_id = Some(sha.to_string()); } else if !json["message"].is_null() { // fixme - this APPEARS to be how docker communicates error messages. let error = json["message"].as_str().unwrap().to_string(); self.error_message = Some(error); } } } } } Ok(data.len()) } }
use std::collections::HashMap; use nom::{ number::complete::{be_u16, be_u32}, multi::count, bytes::complete::take, sequence::tuple, }; use crate::{R, GlyphId, FontError}; use crate::parsers::{iterator_n}; use crate::opentype::{parse_lookup_list, coverage_table, tag, Tag}; #[derive(Debug, Clone)] pub struct GlyphList(Vec<u16>); impl GlyphList { pub fn matches(&self, mut glyphs: impl Iterator<Item=GlyphId>) -> Option<usize> { for &a in &self.0 { match glyphs.next() { Some(b) if a as u32 == b.0 => continue, _ => return None } } Some(self.0.len()) } } #[derive(Debug, Clone)] pub struct GSub { scripts: Vec<Script>, features: Vec<Feature>, lookup: Vec<Vec<Substitution>>, } impl GSub { pub fn default_language(&self) -> Option<&LanguageSystem> { self.scripts.get(0).and_then(|script| script.default_language.as_ref()) } pub fn language(&self, lang_tag: Tag) -> Option<&LanguageSystem> { self.scripts.iter().flat_map(|s| s.languages.iter().filter(|&&(tag, _)| tag == lang_tag).map(|(_, lang)| lang)).next() } pub fn scripts(&self) -> &[Script] { &self.scripts } pub fn subs<'a, 'b: 'a>(&'b self, lang: &'a LanguageSystem, enabled: impl Fn(Tag) -> bool + 'a) -> impl Iterator<Item=&'b Substitution> + 'a { lang.feature_list.iter() .map(move |&FeatureIdx(idx)| &self.features[idx as usize]) .filter(move |feature| enabled(feature.tag)) .flat_map(move |feature| feature.lookup_indices.iter().flat_map(move |&lookup_idx| self.lookup[lookup_idx as usize].iter())) } pub fn lang_features<'a>(&'a self, lang: &'a LanguageSystem) -> impl Iterator<Item=Tag> + 'a { lang.feature_list.iter() .map(move |&FeatureIdx(idx)| self.features[idx as usize].tag) } } #[derive(Debug, Clone)] pub enum Substitution { Single(HashMap<u16, u16>), Ligatures(HashMap<u16, Vec<(GlyphList, u16)>>), } pub fn parse_gsub(data: &[u8]) -> Result<GSub, FontError> { debug!("parse GSUB"); let (i, major_version) = be_u16(data)?; require_eq!(major_version, 1); let (i, minor_version) = be_u16(i)?; let (i, script_list_off) = be_u16(i)?; let (i, feature_list_off) = be_u16(i)?; let (i, lookup_list_off) = be_u16(i)?; let script_list = parse_script_list(slice!(data, script_list_off as usize ..))?; let feature_list = parse_feature_list(slice!(data, feature_list_off as usize ..))?; /* let print_lang = |lang: &LanguageSystem| { if let Some(required) = lang.required_feature { println!(" required {:?}", feature_list[required.0 as usize].tag); } for feature in &lang.feature_list { println!(" other {:?}", feature_list[feature.0 as usize].tag); } }; for (script_nr, script) in script_list.iter().enumerate() { println!("script {}:", script_nr); if let Some(ref default) = script.default_language { println!("default language:"); print_lang(default); } for (tag, lang) in &script.languages { println!(" {:?}", tag); print_lang(lang); } } */ let (i, _feature_variations_offset) = match minor_version { 0 => (i, 0), 1 => be_u32(i)?, v => panic!("unsupported GPOS version 1.{}", v) }; let mut lookup = Vec::new(); parse_lookup_list(slice!(data, lookup_list_off as usize ..), |lookup_idx, data, lookup_type, _lookup_flag| { while lookup_idx >= lookup.len() { lookup.push(Vec::new()); } let mut push = |sub| lookup[lookup_idx].push(sub); match lookup_type { // Single · Replace one glyph with one glyph 1 => push(parse_single_subst(data)?), 2 => debug!("font has Multiple substitutions"), // Multiple · Replace one glyph with more than one glyph 3 => debug!("font has Altername substitutions"), // Alternate · Replace one glyph with one of many glyphs // Ligature · Replace multiple glyphs with one glyph 4 => push(parse_ligatures(data)?), 5 => debug!("font has Context substitutions"), // Context · Replace one or more glyphs in context 6 => debug!("font has Chaining Context substitutions"), // Chaining Context · Replace one or more glyphs in chained context 7 => debug!("font has Extension Substitution"), // Extension Substitution · Extension mechanism for other substitutions (i.e. this excludes the Extension type substitution itself) 8 => debug!("font has Reverse chaining context single substitutions"), // Reverse chaining context single · Applied in reverse order, replace single glyph in chaining context _ => {}, } Ok(()) })?; Ok(GSub { lookup, scripts: script_list, features: feature_list, }) } fn parse_single_subst(data: &[u8]) -> Result<Substitution, FontError> { let (i, format) = be_u16(data)?; let mut subs = HashMap::new(); match format { 1 => { let (i, coverage_offset) = be_u16(i)?; let (i, delta_gid) = be_u16(i)?; let coverage = coverage_table(slice!(data, coverage_offset as usize ..))?; for gid in coverage { subs.insert(gid, gid.wrapping_add(delta_gid)); } }, 2 => { let (i, coverage_offset) = be_u16(i)?; let (i, glyph_count) = be_u16(i)?; let coverage = coverage_table(slice!(data, coverage_offset as usize ..))?; let replacements = iterator_n(i, be_u16, glyph_count); for (gid, replacement_gid) in coverage.zip(replacements) { subs.insert(gid, replacement_gid); } } _ => error!("unsupported single substitution format {}", format) } Ok(Substitution::Single(subs)) } fn parse_ligatures(data: &[u8]) -> Result<Substitution, FontError> { let (i, format) = be_u16(data)?; require_eq!(format, 1); let (i, coverage_offset) = be_u16(i)?; let (i, ligature_set_count) = be_u16(i)?; let coverage = coverage_table(slice!(data, coverage_offset as usize ..))?; let mut ligatures = HashMap::with_capacity(ligature_set_count as usize); for (first, offset) in coverage.zip(iterator_n(i, be_u16, ligature_set_count)) { let set_data = slice!(data, offset as usize ..); let (i, ligature_count) = be_u16(set_data)?; let entry = ligatures.entry(first).or_insert_with(|| Vec::with_capacity(ligature_count as usize)); for set_offest in iterator_n(i, be_u16, ligature_count) { let data = slice!(set_data, set_offest as usize ..); let (i, ligature_glyph) = be_u16(data)?; let (i, component_count) = be_u16(i)?; let (_, components) = count(be_u16, component_count as usize - 1)(i)?; entry.push((GlyphList(components), ligature_glyph)); } } Ok(Substitution::Ligatures(ligatures)) } /* enum Action { GoTo(u16), Glyph(u16) } struct StateMachine { transitions: HashMap<(u16, u16), Action>, // current state, input gid num_states: usize } impl StateMachine { */ fn parse_script_list(data: &[u8]) -> Result<Vec<Script>, FontError> { let (i, script_count) = be_u16(data)?; let mut scripts = Vec::with_capacity(script_count as usize); for (tag, offset) in iterator_n(i, tuple((take(4usize), be_u16)), script_count) { scripts.push(parse_script_table(slice!(data, offset as usize .. ))?); } Ok(scripts) } fn parse_script_table(data: &[u8]) -> Result<Script, FontError> { let (i, default_lang_offset) = be_u16(data)?; let default_language = if default_lang_offset != 0 { Some(parse_language_system_table(slice!(data, default_lang_offset as usize ..))?) } else { None }; let (i, lang_sys_count) = be_u16(i)?; let mut languages = Vec::with_capacity(lang_sys_count as usize); for (tag, lang_sys_offset) in iterator_n(i, tuple((tag, be_u16)), lang_sys_count) { let table = parse_language_system_table(slice!(data, lang_sys_offset as usize ..))?; languages.push((tag, table)); } Ok(Script { default_language, languages, }) } #[derive(Copy, Clone, Debug)] struct FeatureIdx(u16); #[derive(Debug, Clone)] pub struct Script { default_language: Option<LanguageSystem>, languages: Vec<(Tag, LanguageSystem)> } #[derive(Debug, Clone)] pub struct LanguageSystem { feature_list: Vec<FeatureIdx>, required_feature: Option<FeatureIdx>, } #[derive(Debug, Clone)] struct Feature { tag: Tag, lookup_indices: Vec<u16> } // returns (requiredFeatureIndex, FeatureList) fn parse_language_system_table(i: &[u8]) -> Result<LanguageSystem, FontError> { let (i, _lookup_order) = be_u16(i)?; let (i, required_feature_idx) = be_u16(i)?; let (i, feature_index_count) = be_u16(i)?; let feature_list = iterator_n(i, be_u16, feature_index_count).map(FeatureIdx).collect(); let required_feature = if required_feature_idx == 0xFFFF { None } else { Some(FeatureIdx(required_feature_idx)) }; Ok(LanguageSystem { feature_list, required_feature }) } fn parse_feature_list(data: &[u8]) -> Result<Vec<Feature>, FontError> { let (i, feature_count) = be_u16(data)?; let mut features = Vec::with_capacity(feature_count as usize); for (tag, feature_offset) in iterator_n(i, tuple((tag, be_u16)), feature_count) { let lookup_indices = parse_feature_table(slice!(data, feature_offset as usize ..))?.collect(); features.push(Feature { tag, lookup_indices }); } Ok(features) } fn parse_feature_table(i: &[u8]) -> Result<impl Iterator<Item=u16> + '_, FontError> { let (i, _feature_params) = be_u16(i)?; let (i, lookup_index_count) = be_u16(i)?; Ok(iterator_n(i, be_u16, lookup_index_count)) }
/// An enum to represent all characters in the SyriacSupplement block. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum SyriacSupplement { /// \u{860}: 'ࡠ' SyriacLetterMalayalamNga, /// \u{861}: 'ࡡ' SyriacLetterMalayalamJa, /// \u{862}: 'ࡢ' SyriacLetterMalayalamNya, /// \u{863}: 'ࡣ' SyriacLetterMalayalamTta, /// \u{864}: 'ࡤ' SyriacLetterMalayalamNna, /// \u{865}: 'ࡥ' SyriacLetterMalayalamNnna, /// \u{866}: 'ࡦ' SyriacLetterMalayalamBha, /// \u{867}: 'ࡧ' SyriacLetterMalayalamRa, /// \u{868}: 'ࡨ' SyriacLetterMalayalamLla, /// \u{869}: 'ࡩ' SyriacLetterMalayalamLlla, /// \u{86a}: 'ࡪ' SyriacLetterMalayalamSsa, } impl Into<char> for SyriacSupplement { fn into(self) -> char { match self { SyriacSupplement::SyriacLetterMalayalamNga => 'ࡠ', SyriacSupplement::SyriacLetterMalayalamJa => 'ࡡ', SyriacSupplement::SyriacLetterMalayalamNya => 'ࡢ', SyriacSupplement::SyriacLetterMalayalamTta => 'ࡣ', SyriacSupplement::SyriacLetterMalayalamNna => 'ࡤ', SyriacSupplement::SyriacLetterMalayalamNnna => 'ࡥ', SyriacSupplement::SyriacLetterMalayalamBha => 'ࡦ', SyriacSupplement::SyriacLetterMalayalamRa => 'ࡧ', SyriacSupplement::SyriacLetterMalayalamLla => 'ࡨ', SyriacSupplement::SyriacLetterMalayalamLlla => 'ࡩ', SyriacSupplement::SyriacLetterMalayalamSsa => 'ࡪ', } } } impl std::convert::TryFrom<char> for SyriacSupplement { type Error = (); fn try_from(c: char) -> Result<Self, Self::Error> { match c { 'ࡠ' => Ok(SyriacSupplement::SyriacLetterMalayalamNga), 'ࡡ' => Ok(SyriacSupplement::SyriacLetterMalayalamJa), 'ࡢ' => Ok(SyriacSupplement::SyriacLetterMalayalamNya), 'ࡣ' => Ok(SyriacSupplement::SyriacLetterMalayalamTta), 'ࡤ' => Ok(SyriacSupplement::SyriacLetterMalayalamNna), 'ࡥ' => Ok(SyriacSupplement::SyriacLetterMalayalamNnna), 'ࡦ' => Ok(SyriacSupplement::SyriacLetterMalayalamBha), 'ࡧ' => Ok(SyriacSupplement::SyriacLetterMalayalamRa), 'ࡨ' => Ok(SyriacSupplement::SyriacLetterMalayalamLla), 'ࡩ' => Ok(SyriacSupplement::SyriacLetterMalayalamLlla), 'ࡪ' => Ok(SyriacSupplement::SyriacLetterMalayalamSsa), _ => Err(()), } } } impl Into<u32> for SyriacSupplement { fn into(self) -> u32 { let c: char = self.into(); let hex = c .escape_unicode() .to_string() .replace("\\u{", "") .replace("}", ""); u32::from_str_radix(&hex, 16).unwrap() } } impl std::convert::TryFrom<u32> for SyriacSupplement { type Error = (); fn try_from(u: u32) -> Result<Self, Self::Error> { if let Ok(c) = char::try_from(u) { Self::try_from(c) } else { Err(()) } } } impl Iterator for SyriacSupplement { type Item = Self; fn next(&mut self) -> Option<Self> { let index: u32 = (*self).into(); use std::convert::TryFrom; Self::try_from(index + 1).ok() } } impl SyriacSupplement { /// The character with the lowest index in this unicode block pub fn new() -> Self { SyriacSupplement::SyriacLetterMalayalamNga } /// The character's name, in sentence case pub fn name(&self) -> String { let s = std::format!("SyriacSupplement{:#?}", self); string_morph::to_sentence_case(&s) } }
use std::{iter::Cycle, slice::Iter}; use rand_core::{ impls::{next_u32_via_fill, next_u64_via_fill}, RngCore, }; static FAKE_RAND_BYTES: &'static [u8] = include_bytes!("./random_bytes.bin"); #[derive(Clone)] pub struct FakeRand(Cycle<Iter<'static, u8>>); impl FakeRand { pub fn new() -> Self { Default::default() } pub fn skip(&mut self, bytes: usize) { for _ in 0..bytes { self.0.next().unwrap(); } } } impl Default for FakeRand { fn default() -> Self { FakeRand(FAKE_RAND_BYTES.iter().cycle()) } } impl RngCore for FakeRand { fn next_u32(&mut self) -> u32 { next_u32_via_fill(self) } fn next_u64(&mut self) -> u64 { next_u64_via_fill(self) } fn fill_bytes(&mut self, dest: &mut [u8]) { for byte in dest { *byte = *self.0.next().unwrap(); } } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> { Ok(self.fill_bytes(dest)) } }
macro_rules! sha3_impl { ($state:ident, $output_size:ident, $rate:ident, $padding:expr) => { #[allow(non_camel_case_types)] #[derive(Copy, Clone)] pub struct $state { engine: Sha3, } impl Default for $state { fn default() -> Self { Self {engine: Sha3::new($rate::to_usize(), $padding)} } } impl digest::Input for $state { type BlockSize = $rate; fn digest(&mut self, data: &[u8]) { self.engine.absorb(data) } } impl digest::FixedOutput for $state { type OutputSize = $output_size; fn fixed_result(self) -> GenericArray<u8, Self::OutputSize> { let mut out = GenericArray::default(); self.engine.finish(&mut out); out } } } } macro_rules! shake_impl { ($state:ident, $rate:ident, $padding:expr) => { #[derive(Copy, Clone)] pub struct $state { engine: Sha3, } impl Default for $state { fn default() -> Self { Self {engine: Sha3::new($rate::to_usize(), $padding)} } } impl digest::Input for $state { type BlockSize = $rate; fn digest(&mut self, data: &[u8]) { self.engine.absorb(data) } } impl digest::VariableOutput for $state { fn variable_result(self, buffer: &mut [u8]) -> digest::VariableResult { if buffer.len() != 0 { self.engine.finish(buffer); Ok(buffer) } else { Err(digest::InvalidLength) } } } } }
mod chosen; pub mod error; mod listen; mod listen_proxied; pub mod options; use super::{ controlchan, failed_logins::FailedLoginsCache, ftpserver::{error::ServerError, error::ShutdownError, options::FtpsRequired, options::SiteMd5}, shutdown, tls::FtpsConfig, }; use crate::options::ActivePassiveMode; use crate::{ auth::{anonymous::AnonymousAuthenticator, Authenticator, UserDetail}, notification::{nop::NopListener, DataListener, PresenceListener}, options::{FailedLoginsPolicy, FtpsClientAuth, TlsFlags}, server::shutdown::Notifier, server::{ proxy_protocol::{ProxyMode, ProxyProtocolSwitchboard}, tls, }, storage::{Metadata, StorageBackend}, }; use options::{PassiveHost, DEFAULT_GREETING, DEFAULT_IDLE_SESSION_TIMEOUT_SECS}; use slog::*; use std::{fmt::Debug, future::Future, net::SocketAddr, ops::Range, path::PathBuf, pin::Pin, sync::Arc, time::Duration}; /// An instance of an FTP(S) server. It aggregates an [`Authenticator`](crate::auth::Authenticator) /// implementation that will be used for authentication, and a [`StorageBackend`](crate::storage::StorageBackend) /// implementation that will be used as the virtual file system. /// /// The server can be started with the [`listen`](crate::Server::listen()) method. /// /// # Example /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// use tokio::runtime::Runtime; /// /// let mut rt = Runtime::new().unwrap(); /// let server = Server::with_fs("/srv/ftp"); /// rt.spawn(server.listen("127.0.0.1:2121")); /// // ... /// drop(rt); /// ``` /// /// [`Authenticator`]: auth::Authenticator /// [`StorageBackend`]: storage/trait.StorageBackend.html pub struct Server<Storage, User> where Storage: StorageBackend<User>, User: UserDetail, { storage: Arc<dyn (Fn() -> Storage) + Send + Sync>, greeting: &'static str, authenticator: Arc<dyn Authenticator<User>>, data_listener: Arc<dyn DataListener>, presence_listener: Arc<dyn PresenceListener>, passive_ports: Range<u16>, passive_host: PassiveHost, collect_metrics: bool, ftps_mode: FtpsConfig, ftps_required_control_chan: FtpsRequired, ftps_required_data_chan: FtpsRequired, ftps_tls_flags: TlsFlags, ftps_client_auth: FtpsClientAuth, ftps_trust_store: PathBuf, idle_session_timeout: std::time::Duration, proxy_protocol_mode: ProxyMode, logger: slog::Logger, site_md5: SiteMd5, shutdown: Pin<Box<dyn Future<Output = options::Shutdown> + Send + Sync>>, failed_logins_policy: Option<FailedLoginsPolicy>, active_passive_mode: ActivePassiveMode, } impl<Storage, User> Server<Storage, User> where Storage: StorageBackend<User> + 'static, Storage::Metadata: Metadata, User: UserDetail + 'static, { /// Construct a new [`Server`] with the given [`StorageBackend`] generator and an [`AnonymousAuthenticator`] /// /// [`Server`]: struct.Server.html /// [`StorageBackend`]: ../storage/trait.StorageBackend.html /// [`AnonymousAuthenticator`]: ../auth/struct.AnonymousAuthenticator.html pub fn new(sbe_generator: Box<dyn (Fn() -> Storage) + Send + Sync>) -> Self where AnonymousAuthenticator: Authenticator<User>, { Self::with_authenticator(sbe_generator, Arc::new(AnonymousAuthenticator {})) } /// Construct a new [`Server`] with the given [`StorageBackend`] generator and [`Authenticator`]. The other parameters will be set to defaults. /// /// [`Server`]: struct.Server.html /// [`StorageBackend`]: ../storage/trait.StorageBackend.html /// [`Authenticator`]: ../auth/trait.Authenticator.html pub fn with_authenticator(sbe_generator: Box<dyn (Fn() -> Storage) + Send + Sync>, authenticator: Arc<dyn Authenticator<User> + Send + Sync>) -> Self { Server { storage: Arc::from(sbe_generator), greeting: DEFAULT_GREETING, authenticator, data_listener: Arc::new(NopListener {}), presence_listener: Arc::new(NopListener {}), passive_ports: options::DEFAULT_PASSIVE_PORTS, passive_host: options::DEFAULT_PASSIVE_HOST, ftps_mode: FtpsConfig::Off, collect_metrics: false, idle_session_timeout: Duration::from_secs(DEFAULT_IDLE_SESSION_TIMEOUT_SECS), proxy_protocol_mode: ProxyMode::Off, logger: slog::Logger::root(slog_stdlog::StdLog {}.fuse(), slog::o!()), ftps_required_control_chan: options::DEFAULT_FTPS_REQUIRE, ftps_required_data_chan: options::DEFAULT_FTPS_REQUIRE, ftps_tls_flags: TlsFlags::default(), ftps_client_auth: FtpsClientAuth::default(), ftps_trust_store: options::DEFAULT_FTPS_TRUST_STORE.into(), site_md5: SiteMd5::default(), shutdown: Box::pin(futures_util::future::pending()), failed_logins_policy: None, active_passive_mode: ActivePassiveMode::default(), } } /// Set the [`Authenticator`] that will be used for authentication. /// /// # Example /// /// ```rust /// use libunftp::{auth, auth::AnonymousAuthenticator, Server}; /// use unftp_sbe_fs::ServerExt; /// use std::sync::Arc; /// /// // Use it in a builder-like pattern: /// let mut server = Server::with_fs("/tmp") /// .authenticator(Arc::new(auth::AnonymousAuthenticator{})); /// ``` /// /// [`Authenticator`]: ../auth/trait.Authenticator.html pub fn authenticator(mut self, authenticator: Arc<dyn Authenticator<User> + Send + Sync>) -> Self { self.authenticator = authenticator; self } /// Enables one or both of Active/Passive mode. In active mode the server connects to the client's /// data port and in passive mode the client connects the the server's data port. /// /// Active mode is an older mode and considered less secure and is therefore disabled by default. /// /// # Example /// /// ```rust /// use libunftp::options::ActivePassiveMode; /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// /// let server = Server::with_fs("/tmp") /// .active_passive_mode(ActivePassiveMode::ActiveAndPassive); /// ``` pub fn active_passive_mode<M: Into<ActivePassiveMode>>(mut self, mode: M) -> Self { self.active_passive_mode = mode.into(); self } /// Enables FTPS by configuring the path to the certificates file and the private key file. Both /// should be in PEM format. /// /// # Example /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// /// let server = Server::with_fs("/tmp") /// .ftps("/srv/unftp/server.certs", "/srv/unftp/server.key"); /// ``` pub fn ftps<P: Into<PathBuf>>(mut self, certs_file: P, key_file: P) -> Self { self.ftps_mode = FtpsConfig::Building { certs_file: certs_file.into(), key_file: key_file.into(), }; self } /// Allows switching on Mutual TLS. For this to work the trust anchors also needs to be set using /// the [ftps_trust_store](crate::Server::ftps_trust_store) method. /// /// # Example /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// use libunftp::options::FtpsClientAuth; /// /// let server = Server::with_fs("/tmp") /// .ftps("/srv/unftp/server.certs", "/srv/unftp/server.key") /// .ftps_client_auth(FtpsClientAuth::Require) /// .ftps_trust_store("/srv/unftp/trusted.pem"); /// ``` pub fn ftps_client_auth<C>(mut self, auth: C) -> Self where C: Into<FtpsClientAuth>, { self.ftps_client_auth = auth.into(); self } /// Configures whether client connections may use plaintext mode or not. pub fn ftps_required<R>(mut self, for_control_chan: R, for_data_chan: R) -> Self where R: Into<FtpsRequired>, { self.ftps_required_control_chan = for_control_chan.into(); self.ftps_required_data_chan = for_data_chan.into(); self } /// Sets the certificates to use when verifying client certificates in Mutual TLS mode. This /// should point to certificates in a PEM formatted file. For this to have any effect MTLS needs /// to be switched on via the [ftps_client_auth](crate::Server::ftps_client_auth) method. /// /// # Example /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// /// let server = Server::with_fs("/tmp") /// .ftps("/srv/unftp/server.certs", "/srv/unftp/server.key") /// .ftps_client_auth(true) /// .ftps_trust_store("/srv/unftp/trusted.pem"); /// ``` pub fn ftps_trust_store<P>(mut self, trust: P) -> Self where P: Into<PathBuf>, { self.ftps_trust_store = trust.into(); self } /// Switches TLS features on or off. /// /// # Example /// /// This example enables only TLS v1.3 and allows TLS session resumption with tickets. /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// use libunftp::options::TlsFlags; /// /// let mut server = Server::with_fs("/tmp") /// .greeting("Welcome to my FTP Server") /// .ftps("/srv/unftp/server.certs", "/srv/unftp/server.key") /// .ftps_tls_flags(TlsFlags::V1_3 | TlsFlags::RESUMPTION_TICKETS); /// ``` pub fn ftps_tls_flags(mut self, flags: TlsFlags) -> Self { self.ftps_tls_flags = flags; self } /// Set the greeting that will be sent to the client after connecting. /// /// # Example /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// /// // Use it in a builder-like pattern: /// let mut server = Server::with_fs("/tmp").greeting("Welcome to my FTP Server"); /// /// // Or instead if you prefer: /// let mut server = Server::with_fs("/tmp"); /// server.greeting("Welcome to my FTP Server"); /// ``` pub fn greeting(mut self, greeting: &'static str) -> Self { self.greeting = greeting; self } /// Set the idle session timeout in seconds. The default is 600 seconds. /// /// # Example /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// /// // Use it in a builder-like pattern: /// let mut server = Server::with_fs("/tmp").idle_session_timeout(600); /// /// // Or instead if you prefer: /// let mut server = Server::with_fs("/tmp"); /// server.idle_session_timeout(600); /// ``` pub fn idle_session_timeout(mut self, secs: u64) -> Self { self.idle_session_timeout = Duration::from_secs(secs); self } /// Sets the structured logger ([slog](https://crates.io/crates/slog)::Logger) to use pub fn logger<L: Into<Option<slog::Logger>>>(mut self, logger: L) -> Self { self.logger = logger.into().unwrap_or_else(|| slog::Logger::root(slog_stdlog::StdLog {}.fuse(), slog::o!())); self } /// Enables the collection of prometheus metrics. /// /// # Example /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// /// // Use it in a builder-like pattern: /// let mut server = Server::with_fs("/tmp").metrics(); /// /// // Or instead if you prefer: /// let mut server = Server::with_fs("/tmp"); /// server.metrics(); /// ``` pub fn metrics(mut self) -> Self { self.collect_metrics = true; self } /// Sets an [`DataListener`](crate::notification::DataListener) that will /// be notified of data changes that happen in a user's session. pub fn notify_data(mut self, listener: impl DataListener + 'static) -> Self { self.data_listener = Arc::new(listener); self } /// Sets an [`PresenceListener`](crate::notification::PresenceListener) that will /// be notified of user logins and logouts pub fn notify_presence(mut self, listener: impl PresenceListener + 'static) -> Self { self.presence_listener = Arc::new(listener); self } /// Specifies how the IP address that libunftp will advertise in response to the PASV command is /// determined. /// /// # Examples /// /// Using a fixed IP specified as a numeric array: /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// /// let server = Server::with_fs("/tmp") /// .passive_host([127,0,0,1]); /// ``` /// Or the same but more explicitly: /// /// ```rust /// use libunftp::{Server,options}; /// use unftp_sbe_fs::ServerExt; /// use std::net::Ipv4Addr; /// /// let server = Server::with_fs("/tmp") /// .passive_host(options::PassiveHost::Ip(Ipv4Addr::new(127, 0, 0, 1))); /// ``` /// /// To determine the passive IP from the incoming control connection: /// /// ```rust /// use libunftp::{Server,options}; /// use unftp_sbe_fs::ServerExt; /// /// let server = Server::with_fs("/tmp") /// .passive_host(options::PassiveHost::FromConnection); /// ``` /// /// Get the IP by resolving a DNS name: /// /// ```rust /// use libunftp::{Server,options}; /// use unftp_sbe_fs::ServerExt; /// /// let server = Server::with_fs("/tmp") /// .passive_host("ftp.myserver.org"); /// ``` pub fn passive_host<H: Into<PassiveHost>>(mut self, host_option: H) -> Self { self.passive_host = host_option.into(); self } /// Sets the range of passive ports that we'll use for passive connections. /// /// # Example /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// /// // Use it in a builder-like pattern: /// let server = Server::with_fs("/tmp") /// .passive_ports(49152..65535); /// /// // Or instead if you prefer: /// let mut server = Server::with_fs("/tmp"); /// server.passive_ports(49152..65535); /// ``` pub fn passive_ports(mut self, range: Range<u16>) -> Self { self.passive_ports = range; self } /// Enables PROXY protocol mode. /// /// If you use a proxy such as haproxy or nginx, you can enable /// the PROXY protocol /// (<https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt>). /// /// Configure your proxy to enable PROXY protocol encoding for /// control and data external listening ports, forwarding these /// connections to the libunFTP listening port in proxy protocol /// mode. /// /// In PROXY protocol mode, libunftp receives both control and /// data connections on the listening port. It then distinguishes /// control and data connections by comparing the original /// destination port (extracted from the PROXY header) with the /// port specified as the `external_control_port` parameter. /// /// # Example /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// /// // Use it in a builder-like pattern: /// let mut server = Server::with_fs("/tmp").proxy_protocol_mode(2121); /// ``` pub fn proxy_protocol_mode(mut self, external_control_port: u16) -> Self { self.proxy_protocol_mode = external_control_port.into(); self } /// Allows telling libunftp when and how to shutdown gracefully. /// /// The passed argument is a future that resolves when libunftp should shut down. The future /// should return a [options::Shutdown](options::Shutdown) instance. /// /// # Example /// /// ```rust /// use std::time::Duration; /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// /// let mut server = Server::with_fs("/tmp").shutdown_indicator(async { /// tokio::time::sleep(Duration::from_secs(10)).await; // Shut the server down after 10 seconds. /// libunftp::options::Shutdown::new() /// .grace_period(Duration::from_secs(5)) // Allow 5 seconds to shutdown gracefully /// }); /// ``` pub fn shutdown_indicator<I>(mut self, indicator: I) -> Self where I: Future<Output = options::Shutdown> + Send + Sync + 'static, { self.shutdown = Box::pin(indicator); self } /// Enables the FTP command 'SITE MD5'. /// /// _Warning:_ Depending on the storage backend, SITE MD5 may use relatively much memory and /// generate high CPU usage. This opens a Denial of Service vulnerability that could be exploited /// by malicious users, by means of flooding the server with SITE MD5 commands. As such this /// feature is probably best user configured and at least disabled for anonymous users by default. /// /// # Example /// /// ```rust /// use libunftp::Server; /// use libunftp::options::SiteMd5; /// use unftp_sbe_fs::ServerExt; /// /// // Use it in a builder-like pattern: /// let mut server = Server::with_fs("/tmp").sitemd5(SiteMd5::None); /// ``` pub fn sitemd5<M: Into<SiteMd5>>(mut self, sitemd5_option: M) -> Self { self.site_md5 = sitemd5_option.into(); self } /// Enables a password guessing protection policy /// /// Policy used to temporarily block an account, source IP or the /// combination of both, after a certain number of failed login /// attempts for a certain time. /// /// There are different policies to choose from. Such as to lock /// based on the combination of source IP + username or only /// username or IP. For example, if you choose IP based blocking, /// multiple successive failed login attempts will block any login /// attempt from that IP for a defined period, including login /// attempts for other users. /// /// The default policy is to block on the combination of source IP /// and username. This policy affects only this specific /// IP+username combination, and does not block the user logging /// in from elsewhere. /// /// It is also possible to override the default 'Penalty', which /// defines how many failed login attempts before applying the /// policy, and after what time the block expires. /// /// # Examples /// /// ```rust /// use libunftp::Server; /// use libunftp::options::{FailedLoginsPolicy,FailedLoginsBlock}; /// use unftp_sbe_fs::ServerExt; /// /// // With default policy /// let server = Server::with_fs("/tmp").failed_logins_policy(FailedLoginsPolicy::default()); /// /// // Or choose a specific policy like based on source IP and /// // longer block (maximum 3 attempts, 5 minutes, IP based /// // blocking) /// use std::time::Duration; /// let server = Server::with_fs("/tmp").failed_logins_policy(FailedLoginsPolicy::new(3, Duration::from_secs(300), FailedLoginsBlock::IP)); /// ``` pub fn failed_logins_policy(mut self, policy: FailedLoginsPolicy) -> Self { self.failed_logins_policy = Some(policy); self } /// Runs the main FTP process asynchronously. Should be started in a async runtime context. /// /// # Example /// /// ```rust /// use libunftp::Server; /// use unftp_sbe_fs::ServerExt; /// use tokio::runtime::Runtime; /// /// let mut rt = Runtime::new().unwrap(); /// let server = Server::with_fs("/srv/ftp"); /// rt.spawn(server.listen("127.0.0.1:2121")); /// // ... /// drop(rt); /// ``` /// #[tracing_attributes::instrument] pub async fn listen<T: Into<String> + Debug>(mut self, bind_address: T) -> std::result::Result<(), ServerError> { self.ftps_mode = match self.ftps_mode { FtpsConfig::Off => FtpsConfig::Off, FtpsConfig::Building { certs_file, key_file } => FtpsConfig::On { tls_config: tls::new_config(certs_file, key_file, self.ftps_tls_flags, self.ftps_client_auth, self.ftps_trust_store.clone())?, }, FtpsConfig::On { tls_config } => FtpsConfig::On { tls_config }, }; let logger = self.logger.clone(); let bind_address: SocketAddr = bind_address.into().parse()?; let shutdown_notifier = Arc::new(shutdown::Notifier::new()); let failed_logins = self.failed_logins_policy.as_ref().map(|policy| FailedLoginsCache::new(policy.clone())); let listen_future = match self.proxy_protocol_mode { ProxyMode::On { external_control_port } => Box::pin( listen_proxied::ProxyProtocolListener { bind_address, external_control_port, logger: self.logger.clone(), options: (&self).into(), proxy_protocol_switchboard: Some(ProxyProtocolSwitchboard::new(self.logger.clone(), self.passive_ports.clone())), shutdown_topic: shutdown_notifier.clone(), failed_logins: failed_logins.clone(), } .listen(), ) as Pin<Box<dyn Future<Output = std::result::Result<(), ServerError>> + Send>>, ProxyMode::Off => Box::pin( listen::Listener { bind_address, logger: self.logger.clone(), options: (&self).into(), shutdown_topic: shutdown_notifier.clone(), failed_logins: failed_logins.clone(), } .listen(), ) as Pin<Box<dyn Future<Output = std::result::Result<(), ServerError>> + Send>>, }; let sweeper_fut = if let Some(ref failed_logins) = failed_logins { Box::pin(failed_logins.sweeper(self.logger.clone(), shutdown_notifier.clone())) as Pin<Box<dyn futures_util::Future<Output = ()> + Send>> } else { Box::pin(futures_util::future::pending()) as Pin<Box<dyn futures_util::Future<Output = ()> + Send>> }; tokio::select! { result = listen_future => result, _ = sweeper_fut => { Ok(()) }, opts = self.shutdown => { slog::debug!(logger, "Shutting down within {:?}", opts.grace_period); shutdown_notifier.notify().await; Self::shutdown_linger(logger, shutdown_notifier, opts.grace_period).await } } } // Waits for sub-components to shut down gracefully or aborts if the grace period expires async fn shutdown_linger(logger: slog::Logger, shutdown_notifier: Arc<Notifier>, grace_period: Duration) -> std::result::Result<(), ServerError> { let timeout = Box::pin(tokio::time::sleep(grace_period)); tokio::select! { _ = shutdown_notifier.linger() => { slog::debug!(logger, "Graceful shutdown complete"); Ok(()) }, _ = timeout => { Err(ShutdownError{ msg: "shutdown grace period expired".to_string()}.into()) } } // TODO: Implement feature where we keep on listening for a while i.e. GracefulAcceptingConnections } } impl<Storage, User> From<&Server<Storage, User>> for chosen::OptionsHolder<Storage, User> where User: UserDetail + 'static, Storage: StorageBackend<User> + 'static, Storage::Metadata: Metadata, { fn from(server: &Server<Storage, User>) -> Self { chosen::OptionsHolder { authenticator: server.authenticator.clone(), storage: server.storage.clone(), ftps_config: server.ftps_mode.clone(), collect_metrics: server.collect_metrics, greeting: server.greeting, idle_session_timeout: server.idle_session_timeout, passive_ports: server.passive_ports.clone(), passive_host: server.passive_host.clone(), logger: server.logger.new(slog::o!()), ftps_required_control_chan: server.ftps_required_control_chan, ftps_required_data_chan: server.ftps_required_data_chan, site_md5: server.site_md5, data_listener: server.data_listener.clone(), presence_listener: server.presence_listener.clone(), active_passive_mode: server.active_passive_mode, } } } impl<Storage, User> Debug for Server<Storage, User> where Storage: StorageBackend<User>, User: UserDetail, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Server") .field("authenticator", &self.authenticator) .field("collect_metrics", &self.collect_metrics) .field("active_passive_mode", &self.active_passive_mode) .field("greeting", &self.greeting) .field("logger", &self.logger) .field("metrics", &self.collect_metrics) .field("passive_ports", &self.passive_ports) .field("passive_host", &self.passive_host) .field("ftps_client_auth", &self.ftps_client_auth) .field("ftps_mode", &self.ftps_mode) .field("ftps_required_control_chan", &self.ftps_required_control_chan) .field("ftps_required_data_chan", &self.ftps_required_data_chan) .field("ftps_tls_flags", &self.ftps_tls_flags) .field("ftps_trust_store", &self.ftps_trust_store) .field("idle_session_timeout", &self.idle_session_timeout) .field("proxy_protocol_mode", &self.proxy_protocol_mode) .field("failed_logins_policy", &self.failed_logins_policy) .finish() } }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. use std::ops; use rand::{self, RngCore}; use rocksdb::*; use super::tempdir_with_prefix; fn initial_data(path: &str) -> DB { let mut opts = DBOptions::new(); opts.create_if_missing(true); let mut cf_opts = ColumnFamilyOptions::new(); // We will control the compaction manually. cf_opts.set_disable_auto_compactions(true); let db = DB::open_cf(opts, path, vec![("default", cf_opts)]).unwrap(); { let handle = db.cf_handle("default").unwrap(); generate_file_bottom_level(&db, handle, 0..3); generate_file_bottom_level(&db, handle, 3..6); generate_file_bottom_level(&db, handle, 6..9); } db } /// Generates a file with `range` and put it to the bottommost level. fn generate_file_bottom_level(db: &DB, handle: &CFHandle, range: ops::Range<u32>) { for i in range { let k = format!("key{}", i); let v = format!("value{}", i); db.put_cf(handle, k.as_bytes(), v.as_bytes()).unwrap(); } let mut fopts = FlushOptions::default(); fopts.set_wait(true); db.flush_cf(handle, &fopts).unwrap(); let opts = db.get_options_cf(handle); let mut compact_opts = CompactOptions::new(); compact_opts.set_change_level(true); compact_opts.set_target_level(opts.get_num_levels() as i32 - 1); compact_opts.set_bottommost_level_compaction(DBBottommostLevelCompaction::Skip); db.compact_range_cf_opt(handle, &compact_opts, None, None); } #[test] fn test_delete_files_in_range_with_iter() { let path = tempdir_with_prefix("_rust_rocksdb_test_delete_files_in_range_with_iter"); let path_str = path.path().to_str().unwrap(); let db = initial_data(path_str); // construct iterator before DeleteFilesInRange let mut iter = db.iter(); // delete sst2 db.delete_files_in_range(b"key2", b"key7", false).unwrap(); let mut count = 0; assert!(iter.seek(SeekKey::Start).unwrap()); while iter.valid().unwrap() { iter.next().unwrap(); count = count + 1; } // iterator will pin all sst files. assert_eq!(count, 9); } #[test] fn test_delete_files_in_range_with_snap() { let path = tempdir_with_prefix("_rust_rocksdb_test_delete_files_in_range_with_snap"); let path_str = path.path().to_str().unwrap(); let db = initial_data(path_str); // construct snapshot before DeleteFilesInRange let snap = db.snapshot(); // delete sst2 db.delete_files_in_range(b"key2", b"key7", false).unwrap(); let mut iter = snap.iter(); assert!(iter.seek(SeekKey::Start).unwrap()); let mut count = 0; while iter.valid().unwrap() { iter.next().unwrap(); count = count + 1; } // sst2 has been dropped. assert_eq!(count, 6); } #[test] fn test_delete_files_in_range_with_delete_range() { // Regression test for https://github.com/facebook/rocksdb/issues/2833. let path = tempdir_with_prefix("_rocksdb_test_delete_files_in_range_with_delete_range"); let path_str = path.path().to_str().unwrap(); let sst_size = 1 << 10; let value_size = 8 << 10; let mut opts = DBOptions::new(); opts.create_if_missing(true); let mut cf_opts = ColumnFamilyOptions::new(); cf_opts.set_target_file_size_base(sst_size); cf_opts.set_level_zero_file_num_compaction_trigger(10); let db = DB::open_cf(opts, path_str, vec![("default", cf_opts)]).unwrap(); // Flush 5 files in level 0. // File i will contain keys i and i+1. let mut fopts = FlushOptions::default(); fopts.set_wait(true); for i in 0..5 { let k1 = format!("{}", i); let k2 = format!("{}", i + 1); let mut v = vec![0; value_size]; rand::thread_rng().fill_bytes(&mut v); db.put(k1.as_bytes(), v.as_slice()).unwrap(); db.put(k2.as_bytes(), v.as_slice()).unwrap(); db.flush(&fopts).unwrap(); } // Hold a snapshot to prevent the following delete range from dropping keys above. let snapshot = db.snapshot(); db.delete_range(b"0", b"6").unwrap(); db.flush(&fopts).unwrap(); // After this, we will have 3 files in level 1. // File i will contain keys i and i+1, and the delete range [0, 6). db.compact_range(None, None); drop(snapshot); // Before the fix, the file in the middle with keys 2 and 3 will be deleted, // which can be a problem when we compact later. After the fix, no file will // be deleted since they have an overlapped delete range [0, 6). db.delete_files_in_range(b"1", b"4", false).unwrap(); // Flush a file with keys 4 and 5 to level 0. for i in 4..5 { let k1 = format!("{}", i); let k2 = format!("{}", i + 1); let mut v = vec![0; value_size]; rand::thread_rng().fill_bytes(&mut v); db.put(k1.as_bytes(), v.as_slice()).unwrap(); db.put(k2.as_bytes(), v.as_slice()).unwrap(); db.flush(&fopts).unwrap(); } // After this, the delete range [0, 6) will drop all entries before it, so // we should have only keys 4 and 5. db.compact_range(None, None); let mut it = db.iter(); it.seek(SeekKey::Start).unwrap(); assert!(it.valid().unwrap()); assert_eq!(it.key(), b"4"); assert!(it.next().unwrap()); assert_eq!(it.key(), b"5"); assert!(!it.next().unwrap()); } #[test] fn test_delete_files_in_ranges() { let path = tempdir_with_prefix("_rust_rocksdb_test_delete_files_in_multi_ranges"); let path_str = path.path().to_str().unwrap(); let db = initial_data(path_str); // Delete files in multiple overlapped ranges. // File ["key0", "key2"], ["key3", "key5"] should have been deleted, // but file ["key6", "key8"] should not be deleted because "key8" is exclusive. let mut ranges = Vec::new(); ranges.push(Range::new(b"key0", b"key4")); ranges.push(Range::new(b"key2", b"key6")); ranges.push(Range::new(b"key4", b"key8")); let cf = db.cf_handle("default").unwrap(); db.delete_files_in_ranges_cf(cf, &ranges, false).unwrap(); // Check that ["key0", "key5"] have been deleted, but ["key6", "key8"] still exist. let mut iter = db.iter(); iter.seek(SeekKey::Start).unwrap(); for i in 6..9 { assert!(iter.valid().unwrap()); let k = format!("key{}", i); assert_eq!(iter.key(), k.as_bytes()); iter.next().unwrap(); } assert!(!iter.valid().unwrap()); // Delete the last file. let ranges = vec![Range::new(b"key6", b"key8")]; db.delete_files_in_ranges_cf(cf, &ranges, true).unwrap(); let mut iter = db.iter(); assert!(!iter.seek(SeekKey::Start).unwrap()); }
#[doc = "Register `FMC_CSQCFGR3` reader"] pub type R = crate::R<FMC_CSQCFGR3_SPEC>; #[doc = "Register `FMC_CSQCFGR3` writer"] pub type W = crate::W<FMC_CSQCFGR3_SPEC>; #[doc = "Field `SNBR` reader - SNBR"] pub type SNBR_R = crate::FieldReader; #[doc = "Field `SNBR` writer - SNBR"] pub type SNBR_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 6, O>; #[doc = "Field `AC1T` reader - AC1T"] pub type AC1T_R = crate::BitReader; #[doc = "Field `AC1T` writer - AC1T"] pub type AC1T_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `AC2T` reader - AC2T"] pub type AC2T_R = crate::BitReader; #[doc = "Field `AC2T` writer - AC2T"] pub type AC2T_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `AC3T` reader - AC3T"] pub type AC3T_R = crate::BitReader; #[doc = "Field `AC3T` writer - AC3T"] pub type AC3T_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `AC4T` reader - AC4T"] pub type AC4T_R = crate::BitReader; #[doc = "Field `AC4T` writer - AC4T"] pub type AC4T_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `AC5T` reader - AC5T"] pub type AC5T_R = crate::BitReader; #[doc = "Field `AC5T` writer - AC5T"] pub type AC5T_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `SDT` reader - SDT"] pub type SDT_R = crate::BitReader; #[doc = "Field `SDT` writer - SDT"] pub type SDT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `RAC1T` reader - RAC1T"] pub type RAC1T_R = crate::BitReader; #[doc = "Field `RAC1T` writer - RAC1T"] pub type RAC1T_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `RAC2T` reader - RAC2T"] pub type RAC2T_R = crate::BitReader; #[doc = "Field `RAC2T` writer - RAC2T"] pub type RAC2T_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; impl R { #[doc = "Bits 8:13 - SNBR"] #[inline(always)] pub fn snbr(&self) -> SNBR_R { SNBR_R::new(((self.bits >> 8) & 0x3f) as u8) } #[doc = "Bit 16 - AC1T"] #[inline(always)] pub fn ac1t(&self) -> AC1T_R { AC1T_R::new(((self.bits >> 16) & 1) != 0) } #[doc = "Bit 17 - AC2T"] #[inline(always)] pub fn ac2t(&self) -> AC2T_R { AC2T_R::new(((self.bits >> 17) & 1) != 0) } #[doc = "Bit 18 - AC3T"] #[inline(always)] pub fn ac3t(&self) -> AC3T_R { AC3T_R::new(((self.bits >> 18) & 1) != 0) } #[doc = "Bit 19 - AC4T"] #[inline(always)] pub fn ac4t(&self) -> AC4T_R { AC4T_R::new(((self.bits >> 19) & 1) != 0) } #[doc = "Bit 20 - AC5T"] #[inline(always)] pub fn ac5t(&self) -> AC5T_R { AC5T_R::new(((self.bits >> 20) & 1) != 0) } #[doc = "Bit 21 - SDT"] #[inline(always)] pub fn sdt(&self) -> SDT_R { SDT_R::new(((self.bits >> 21) & 1) != 0) } #[doc = "Bit 22 - RAC1T"] #[inline(always)] pub fn rac1t(&self) -> RAC1T_R { RAC1T_R::new(((self.bits >> 22) & 1) != 0) } #[doc = "Bit 23 - RAC2T"] #[inline(always)] pub fn rac2t(&self) -> RAC2T_R { RAC2T_R::new(((self.bits >> 23) & 1) != 0) } } impl W { #[doc = "Bits 8:13 - SNBR"] #[inline(always)] #[must_use] pub fn snbr(&mut self) -> SNBR_W<FMC_CSQCFGR3_SPEC, 8> { SNBR_W::new(self) } #[doc = "Bit 16 - AC1T"] #[inline(always)] #[must_use] pub fn ac1t(&mut self) -> AC1T_W<FMC_CSQCFGR3_SPEC, 16> { AC1T_W::new(self) } #[doc = "Bit 17 - AC2T"] #[inline(always)] #[must_use] pub fn ac2t(&mut self) -> AC2T_W<FMC_CSQCFGR3_SPEC, 17> { AC2T_W::new(self) } #[doc = "Bit 18 - AC3T"] #[inline(always)] #[must_use] pub fn ac3t(&mut self) -> AC3T_W<FMC_CSQCFGR3_SPEC, 18> { AC3T_W::new(self) } #[doc = "Bit 19 - AC4T"] #[inline(always)] #[must_use] pub fn ac4t(&mut self) -> AC4T_W<FMC_CSQCFGR3_SPEC, 19> { AC4T_W::new(self) } #[doc = "Bit 20 - AC5T"] #[inline(always)] #[must_use] pub fn ac5t(&mut self) -> AC5T_W<FMC_CSQCFGR3_SPEC, 20> { AC5T_W::new(self) } #[doc = "Bit 21 - SDT"] #[inline(always)] #[must_use] pub fn sdt(&mut self) -> SDT_W<FMC_CSQCFGR3_SPEC, 21> { SDT_W::new(self) } #[doc = "Bit 22 - RAC1T"] #[inline(always)] #[must_use] pub fn rac1t(&mut self) -> RAC1T_W<FMC_CSQCFGR3_SPEC, 22> { RAC1T_W::new(self) } #[doc = "Bit 23 - RAC2T"] #[inline(always)] #[must_use] pub fn rac2t(&mut self) -> RAC2T_W<FMC_CSQCFGR3_SPEC, 23> { RAC2T_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "FMC NAND sequencer configuration register 3\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`fmc_csqcfgr3::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`fmc_csqcfgr3::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct FMC_CSQCFGR3_SPEC; impl crate::RegisterSpec for FMC_CSQCFGR3_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`fmc_csqcfgr3::R`](R) reader structure"] impl crate::Readable for FMC_CSQCFGR3_SPEC {} #[doc = "`write(|w| ..)` method takes [`fmc_csqcfgr3::W`](W) writer structure"] impl crate::Writable for FMC_CSQCFGR3_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets FMC_CSQCFGR3 to value 0"] impl crate::Resettable for FMC_CSQCFGR3_SPEC { const RESET_VALUE: Self::Ux = 0; }
#[macro_use] extern crate lazy_static; #[macro_use] extern crate log; #[macro_use] extern crate maplit; use std::io::Write; use std::thread::{spawn, sleep}; use std::time::{Duration, SystemTime}; use std::sync::{Arc}; use std::rc::Rc; use std::cell::RefCell; use gio::prelude::*; use gtk::prelude::*; use gtk::{Application}; use serde::{Serialize, Deserialize}; mod winapi_stuff; use winapi_stuff::*; use std::collections::HashMap; use plugin_interface::{PuszRow, PuszRowBuilder, PuszRowIdentifier, PuszAction, PuszEvent, PuszEntry, PluginEvent, SpecialKey}; use crate::winapi_stuff::ReceivedMessage::Hotkey; #[derive(Serialize, Deserialize, PartialEq, Debug)] enum Model { Clip, } fn set_visual(window: &gtk::ApplicationWindow, _screen: Option<&gdk::Screen>) { if let Some(screen) = window.get_screen() { if let Some(ref visual) = screen.get_rgba_visual() { window.set_visual(Some(visual)); // crucial for transparency } } } fn draw(_window: &gtk::ApplicationWindow, ctx: &cairo::Context) -> Inhibit { // crucial for transparency ctx.set_source_rgba(0.0, 0.0, 0.0, 0.0); ctx.set_operator(cairo::Operator::Screen); ctx.paint(); Inhibit(false) } fn draw_entry_background(_window: &gtk::Box, ctx: &cairo::Context) -> Inhibit { // crucial for transparency if _window.has_focus() { ctx.set_source_rgba(1.0, 0.0, 1.0, 1.0); } else { ctx.set_source_rgba(1.0, 0.0, 0.0, 1.0); } ctx.set_operator(cairo::Operator::Screen); ctx.paint(); Inhibit(false) } fn special_entry(ctx : &Context, text : &str) -> Vec<PuszEntry> { let mut entries = vec![]; for (regex, base) in ctx.special_entries_builders.iter() { for cap in regex.captures_iter(text) { entries.push(PuszEntry { actions : btreemap!(PuszEvent::Click => PuszAction::SetClipboard), label: format!("snow link: {}", cap[1].to_owned()), content: format!("https://ig.service-now.com/{}.do?sysparm_query=number={}", base, cap[1].to_owned()), }) } } entries } //okay gdk_event may not make sense - how to pass on keystrokes here? for now single return is sent as DAMAGE :) fn handle_action(gdk_event: gdk::EventType, entry : &PuszEntry, plugins : &mut HashMap<String, Box<dyn plugin_interface::Plugin>>) -> Inhibit { let action = match gdk_event { gdk::EventType::ButtonPress => { if let Some(action) = entry.actions.get(&PuszEvent::Click) { action } else { return Inhibit(false); } } gdk::EventType::Damage => { //fake out return lul. if let Some(action) = entry.actions.get(&PuszEvent::SpecialKeyPress(SpecialKey::Return)) { action } else { return Inhibit(false); } } _ => { return Inhibit(false); } }; match action { PuszAction::SetClipboard => { HotkeyData::set_clipboard(&entry.content); Inhibit(true) }, PuszAction::OpenBrowserIfLink => { if url::Url::parse(&entry.content).is_ok() { webbrowser::open(&entry.content); } Inhibit(true) }, PuszAction::CustomAction => { panic!("custom action"); //this inhibit depends on the plugin result. Inhibit(false) }, } } fn spawn_entry(ctx : Rc<RefCell<Context>>, main_edit : gtk::Entry, row : PuszRow) -> gtk::Box { let container = gtk::Box::new(gtk::Orientation::Horizontal, 0); let text = row.main_entry.content.clone(); let text_cloned = text.clone(); let ctx_clone = ctx.clone(); let main_entry_clone = row.main_entry.clone(); container.connect_key_press_event(move |_, event_key| { use gdk::enums::key::*; #[allow(non_upper_case_globals)] match event_key.get_keyval() { Return => { let ctx : &mut Context = &mut ctx_clone.borrow_mut(); handle_action(gdk::EventType::ButtonPress, &main_entry_clone, &mut ctx.plugins); //we are doing buttonpress and return at the same time.. temporarly(?) handle_action(gdk::EventType::Damage, &main_entry_clone, &mut ctx.plugins); Inhibit(false) } Down | Up => { Inhibit(false) } _ => { main_edit.grab_focus_without_selecting(); let _ = main_edit.emit("key-press-event", &[&event_key.to_value()]); Inhibit(true) } } }); container.set_can_focus(true); // container.set_has_window(true); crashes app. container.connect_draw(draw_entry_background); //this could be a function a row.. but not really as it would consume whole row. let mut entries = row.additional_entries; entries.insert(0, row.main_entry); for entry in entries { let button = gtk::Button::new_with_label(&entry.label); let ctx = ctx.clone(); button.connect_button_press_event(move |_, event| { let ctx: &mut Context = &mut ctx.borrow_mut(); handle_action(event.get_event_type(), &entry, &mut ctx.plugins) }); container.add(&button); } // if row.is_removable { // let removal_button = gtk::Button::new_with_label("X"); // { // let container = container.clone(); // container.add(&removal_button); // removal_button.connect_button_press_event(move |_, _| { // let ctx: &mut Context = &mut ctx.borrow_mut(); // // plugin ask to remove. //// ctx.remove_entry(&text); // // container.hide(); // container.grab_focus(); // // Inhibit(true) // }); // } // } container } struct Query { action : String, query: String, } impl Query { fn action(&self) -> &str { if self.action.is_empty() { "clip" } else { self.action() } } } struct Context { special_entries_builders : Vec<(regex::Regex, String)>, plugins : HashMap<String, Box<dyn plugin_interface::Plugin>>, } impl Context { fn new() -> Self { let r = [(r"(INC\d{4,})", "incident"), (r"(RITM\d{4,})", "sc_req_item"), (r"(CHG\d{4,})", "change_request"), (r"(PRB\d{4,})", "problem") , (r"(PRBTASK\d{4,})", "problem_task")]; Self { special_entries_builders: r.iter().map(|(pattern, base)| (regex::Regex::new(pattern).expect(&format!("failure to build regex from {}", pattern)), base.to_string())).collect(), plugins: load_plugins(), } } fn remove_entry(&mut self, text: &str) { // need more efficient sol! :) // self.model.clips.retain( |e| e.text != text); // save_data_model("pusz.json", &self.model); } } enum PuszInternalEvent { ClipboardChanged(String), BringToFront, } fn build_ui(application: &gtk::Application) { let ctx = Rc::new(RefCell::new(Context::new())); let window = gtk::ApplicationWindow::new(application); window.connect_screen_changed(set_visual); window.connect_draw(draw); window.set_app_paintable(true); // crucial for transparency let (tx, rx) = glib::MainContext::channel(glib::PRIORITY_DEFAULT); { let tx = tx.clone(); HotkeyData::do_it(WindowsApiEvent::AddClipboardListener { handler: Arc::new(move |clip| { tx.send(PuszInternalEvent::ClipboardChanged(clip)).expect("send failure"); } ) }); } window.set_title("pusz"); window.set_border_width(0); window.set_position(gtk::WindowPosition::Center); window.set_default_size(840, 480); window.set_decorated(false); // window.connect_focus_in_event(|_, event| { // println!("gained focus."); // Inhibit(false) // } ); // window.connect_focus_out_event(|_, event| { // println!("lost focus."); // Inhibit(false) // } ); let input_field = gtk::Entry::new(); let row = gtk::Box::new(gtk::Orientation::Vertical, 1); let scroll_container = gtk::ScrolledWindow::new( gtk::NONE_ADJUSTMENT, gtk::NONE_ADJUSTMENT); scroll_container.set_max_content_height(400); let scroll_insides = gtk::Box::new(gtk::Orientation::Vertical, 1); scroll_container.add(&scroll_insides); row.add(&input_field); // row.pack_start(&input_field, false, false, 10); row.add(&scroll_container); row.set_child_expand(&scroll_container, true); // let mut visible = true; window.add(&row); window.show_all(); { let ctx = Rc::clone(&ctx); let input_field = input_field.clone(); input_field.clone().connect_key_press_event(move |_, event_key| { use gdk::enums::key::*; #[allow(non_upper_case_globals)] match event_key.get_keyval() { Return => { //TODO: so what we need to do here is we need to have an ability to know which entry is the first one upon pressing enter //alternatively we just pass this to plugin - but which plugin? without /command to no plugin? with command to specific plugin. println!("key pressed on main focus"); Inhibit(false) } _ => { Inhibit(false) } } }); input_field.clone().connect_changed(move |entry| { for c in &scroll_insides.get_children() { scroll_insides.remove(c); } if let Some(text) = entry.get_text() { let mut words = text.split_whitespace(); let (query, command) = if text.starts_with("/") { let command = &words.next().unwrap()[1..]; let query: String = words.collect(); (query, Some(command)) } else { (text.to_string(), None) }; //would current borrowck allow me to store plugins into vec rather than doing the below abom? let results: Vec<PluginResult> = ctx.borrow_mut() .plugins .iter_mut() .filter(|(name, plugin)| Some(plugin.name()) == command || !plugin.settings().requies_explicit_query) .map(|(_name, plugin)| { plugin.query(&query) // hint about what plugins are available }).collect(); use plugin_interface::*; // better handle a case where nothing matches. // if results.is_empty() { // let err_message = format!("{:?}", result); // let err_row = PuszRowBuilder::new(err_message, main_id()).build().unwrap(); // // scroll_insides.add(&spawn_entry(ctx.clone(), input_field.clone(), err_row)); // } else { for result in results { if let PluginResult::Ok(results) = result { for r in results { scroll_insides.add(&spawn_entry(ctx.clone(), input_field.clone(), r)); } } } } scroll_insides.show_all(); }); } rx.attach(None, move |event| { match event { PuszInternalEvent::ClipboardChanged(clipboard) => { for (_, plugin) in ctx.borrow_mut().plugins.iter_mut() { if plugin.settings().interested_in_clipboard { plugin.on_subscribed_event(&PluginEvent::Clipboard(clipboard.clone())); } } }, PuszInternalEvent::BringToFront => { window.present(); if let Some(clip) = HotkeyData::get_clipboard() { input_field.grab_focus(); input_field.emit_paste_clipboard(); } }, } glib::Continue(true) }); HotkeyData::register_hotkey(13, winapi_stuff::Key::F1, Modifier::None, Arc::new(move |_| { tx.send(PuszInternalEvent::BringToFront).unwrap(); })); } fn load_plugins() -> HashMap<String, Box<dyn plugin_interface::Plugin>> { use std::fs; let mut dll_paths = if let Ok(entries) = fs::read_dir("plugins") { entries.filter_map(|e| e.ok()).filter_map(|e| e.path().into_os_string().into_string().ok()).filter(|file_name| file_name.ends_with(".dll")).collect::<Vec<_>>() } else { println!("couldnt read plugins dir?"); return HashMap::new(); }; //hacky solution for development purposes. if std::path::Path::new("target/debug/calc_plugin.dll").exists() { dll_paths.push("target/debug/calc_plugin.dll".to_string()); } if std::path::Path::new("target/debug/clipboard_plugin.dll").exists() { dll_paths.push("target/debug/clipboard_plugin.dll".to_string()); } let mut plugins : Vec<Box<dyn plugin_interface::Plugin>> = unsafe { dll_paths.into_iter().map(|dll_path| { let lib = libloading::Library::new(dll_path).expect("failed to load"); let load: libloading::Symbol<plugin_interface::LoadFn> = lib.get(b"load").expect("failed to load introduce"); let plugin = load(plugin_interface::COMMON_INTERFACE_VERSION); //well - we dont want to unload plugins ever. ::std::mem::forget(lib); plugin.expect("couldnt load plugin!") }).collect() }; plugins.into_iter().map(|p| (p.name().to_string(), p)).collect() } fn main() { use simplelog::*; use std::fs::File; CombinedLogger::init( vec![ TermLogger::new(LevelFilter::Warn, Config::default(), TerminalMode::Mixed).unwrap(), WriteLogger::new(LevelFilter::Info, Config::default(), File::create("pusz.log").unwrap()), ] ).unwrap(); info!("Pusz application initializing."); let application = Application::new(Some("com.github.gtk-rs.examples.basic"), Default::default()) .expect("failed to initialize GTK application"); application.connect_activate(|app| { build_ui(app); }); application.run(&[]); } #[cfg(test)] mod model_tests { use super::*; #[test] fn special_entry_snow() { let ctx = Context::new(); assert_eq!(special_entry(&ctx,"invalid"), vec![]); assert_eq!(special_entry(&ctx,"INC0123"), vec![PuszEntry { actions : btreemap!(PuszEvent::Click => PuszAction::SetClipboard), label : "snow link: INC0123".to_owned(), content : "https://ig.service-now.com/incident.do?sysparm_query=number=INC0123".to_owned() }]); assert_eq!(special_entry(&ctx,"CHG0123"), vec![PuszEntry { actions : btreemap!(PuszEvent::Click => PuszAction::SetClipboard), label : "snow link: CHG0123".to_owned(), content : "https://ig.service-now.com/change_request.do?sysparm_query=number=CHG0123".to_owned() }]); assert_eq!(special_entry(&ctx,"RITM0123"), vec![PuszEntry { actions : btreemap!(PuszEvent::Click => PuszAction::SetClipboard), label : "snow link: RITM0123".to_owned(), content : "https://ig.service-now.com/sc_req_item.do?sysparm_query=number=RITM0123".to_owned() }]); assert_eq!(special_entry(&ctx,"PRBTASK0123"), vec![PuszEntry { actions : btreemap!(PuszEvent::Click => PuszAction::SetClipboard), label : "snow link: PRBTASK0123".to_owned(), content : "https://ig.service-now.com/problem_task.do?sysparm_query=number=PRBTASK0123".to_owned() }]); assert_eq!(special_entry(&ctx,"PRB0123"), vec![PuszEntry { actions : btreemap!(PuszEvent::Click => PuszAction::SetClipboard), label : "snow link: PRB0123".to_owned(), content : "https://ig.service-now.com/problem.do?sysparm_query=number=PRB0123".to_owned() }]); } #[test] fn fuzzy_match_showcase() { use fuzzy_matcher::skim::fuzzy_match; assert_eq!(Some(106), fuzzy_match("choice", "choice")); //hm. better result than 1:1 string? assert_eq!(Some(110), fuzzy_match("c-hoice", "choice")); assert_eq!(Some(46), fuzzy_match("cxhxoxixcxex", "choice")); } }
#[macro_use] mod macros; mod libc; mod internals; pub mod version; pub mod universe; pub mod string; pub mod node; pub mod node_entry; pub mod vector; pub mod matrix; pub mod constants; pub mod utils; pub mod math; pub use ai::version::*; pub use ai::universe::*; pub use ai::string::*; pub use ai::node::*; pub use ai::node_entry::*; pub use ai::vector::*; pub use ai::matrix::*; pub use ai::constants::*; pub use ai::utils::*; pub use ai::math::*;
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Incident { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "ruleName", default, skip_serializing_if = "Option::is_none")] pub rule_name: Option<String>, #[serde(rename = "isActive", default, skip_serializing_if = "Option::is_none")] pub is_active: Option<bool>, #[serde(rename = "activatedTime", default, skip_serializing_if = "Option::is_none")] pub activated_time: Option<String>, #[serde(rename = "resolvedTime", default, skip_serializing_if = "Option::is_none")] pub resolved_time: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IncidentListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Incident>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RuleCondition { #[serde(rename = "odata.type")] pub odata_type: String, #[serde(rename = "dataSource", default, skip_serializing_if = "Option::is_none")] pub data_source: Option<RuleDataSource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RuleDataSource { #[serde(rename = "odata.type")] pub odata_type: String, #[serde(rename = "resourceUri", default, skip_serializing_if = "Option::is_none")] pub resource_uri: Option<String>, #[serde(rename = "legacyResourceId", default, skip_serializing_if = "Option::is_none")] pub legacy_resource_id: Option<String>, #[serde(rename = "resourceLocation", default, skip_serializing_if = "Option::is_none")] pub resource_location: Option<String>, #[serde(rename = "metricNamespace", default, skip_serializing_if = "Option::is_none")] pub metric_namespace: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RuleMetricDataSource { #[serde(flatten)] pub rule_data_source: RuleDataSource, #[serde(rename = "metricName", default, skip_serializing_if = "Option::is_none")] pub metric_name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RuleManagementEventClaimsDataSource { #[serde(rename = "emailAddress", default, skip_serializing_if = "Option::is_none")] pub email_address: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RuleManagementEventDataSource { #[serde(flatten)] pub rule_data_source: RuleDataSource, #[serde(rename = "eventName", default, skip_serializing_if = "Option::is_none")] pub event_name: Option<String>, #[serde(rename = "eventSource", default, skip_serializing_if = "Option::is_none")] pub event_source: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub level: Option<String>, #[serde(rename = "operationName", default, skip_serializing_if = "Option::is_none")] pub operation_name: Option<String>, #[serde(rename = "resourceGroupName", default, skip_serializing_if = "Option::is_none")] pub resource_group_name: Option<String>, #[serde(rename = "resourceProviderName", default, skip_serializing_if = "Option::is_none")] pub resource_provider_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, #[serde(rename = "subStatus", default, skip_serializing_if = "Option::is_none")] pub sub_status: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub claims: Option<RuleManagementEventClaimsDataSource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ConditionOperator { GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum TimeAggregationOperator { Average, Minimum, Maximum, Total, Last, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ThresholdRuleCondition { #[serde(flatten)] pub rule_condition: RuleCondition, pub operator: ConditionOperator, pub threshold: f64, #[serde(rename = "windowSize", default, skip_serializing_if = "Option::is_none")] pub window_size: Option<String>, #[serde(rename = "timeAggregation", default, skip_serializing_if = "Option::is_none")] pub time_aggregation: Option<TimeAggregationOperator>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LocationThresholdRuleCondition { #[serde(flatten)] pub rule_condition: RuleCondition, #[serde(rename = "windowSize", default, skip_serializing_if = "Option::is_none")] pub window_size: Option<String>, #[serde(rename = "failedLocationCount")] pub failed_location_count: i32, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagementEventAggregationCondition { #[serde(default, skip_serializing_if = "Option::is_none")] pub operator: Option<ConditionOperator>, #[serde(default, skip_serializing_if = "Option::is_none")] pub threshold: Option<f64>, #[serde(rename = "windowSize", default, skip_serializing_if = "Option::is_none")] pub window_size: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagementEventRuleCondition { #[serde(flatten)] pub rule_condition: RuleCondition, #[serde(default, skip_serializing_if = "Option::is_none")] pub aggregation: Option<ManagementEventAggregationCondition>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RuleAction { #[serde(rename = "odata.type")] pub odata_type: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RuleEmailAction { #[serde(flatten)] pub rule_action: RuleAction, #[serde(rename = "sendToServiceOwners", default, skip_serializing_if = "Option::is_none")] pub send_to_service_owners: Option<bool>, #[serde(rename = "customEmails", default, skip_serializing_if = "Vec::is_empty")] pub custom_emails: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RuleWebhookAction { #[serde(flatten)] pub rule_action: RuleAction, #[serde(rename = "serviceUri", default, skip_serializing_if = "Option::is_none")] pub service_uri: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AlertRule { pub name: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<String>, #[serde(rename = "isEnabled")] pub is_enabled: bool, pub condition: RuleCondition, #[serde(default, skip_serializing_if = "Option::is_none")] pub action: Option<RuleAction>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub actions: Vec<RuleAction>, #[serde(rename = "lastUpdatedTime", default, skip_serializing_if = "Option::is_none")] pub last_updated_time: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, pub location: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AlertRuleResource { #[serde(flatten)] pub resource: Resource, pub properties: AlertRule, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AlertRuleResourcePatch { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<AlertRule>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AlertRuleResourceCollection { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<AlertRuleResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RetentionPolicy { pub enabled: bool, pub days: i32, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LogProfileProperties { #[serde(rename = "storageAccountId", default, skip_serializing_if = "Option::is_none")] pub storage_account_id: Option<String>, #[serde(rename = "serviceBusRuleId", default, skip_serializing_if = "Option::is_none")] pub service_bus_rule_id: Option<String>, pub locations: Vec<String>, pub categories: Vec<String>, #[serde(rename = "retentionPolicy")] pub retention_policy: RetentionPolicy, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LogProfileResource { #[serde(flatten)] pub resource: Resource, pub properties: LogProfileProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LogProfileResourcePatch { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<LogProfileProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LogProfileCollection { pub value: Vec<LogProfileResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LocalizableString { pub value: String, #[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")] pub localized_value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MetricAvailability { #[serde(rename = "timeGrain", default, skip_serializing_if = "Option::is_none")] pub time_grain: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub retention: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Unit { Count, Bytes, Seconds, CountPerSecond, BytesPerSecond, Percent, MilliSeconds, ByteSeconds, Unspecified, Cores, MilliCores, NanoCores, BitsPerSecond, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MetricDefinition { #[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")] pub resource_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<LocalizableString>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<Unit>, #[serde(rename = "primaryAggregationType", default, skip_serializing_if = "Option::is_none")] pub primary_aggregation_type: Option<metric_definition::PrimaryAggregationType>, #[serde(rename = "metricAvailabilities", default, skip_serializing_if = "Vec::is_empty")] pub metric_availabilities: Vec<MetricAvailability>, #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, } pub mod metric_definition { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum PrimaryAggregationType { None, Average, Count, Minimum, Maximum, Total, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MetricDefinitionCollection { pub value: Vec<MetricDefinition>, }