text stringlengths 8 4.13M |
|---|
use core::fmt::{self, Write};
use common::time::Duration;
#[derive(Copy, Clone)]
pub enum LogLevel {
Critical,
Error,
Warning,
Info,
Debug,
}
/// Add message to kernel logs with format
#[macro_export]
macro_rules! syslog {
($level:expr, $($arg:tt)*) => ({
$crate::logging::syslog_inner($level, format_args!($($arg)*));
});
}
#[macro_export]
macro_rules! syslog_debug {
($($arg:tt)*) => ({
$crate::logging::syslog_inner($crate::logging::LogLevel::Debug, format_args!($($arg)*));
});
}
#[macro_export]
macro_rules! syslog_info {
($($arg:tt)*) => ({
$crate::logging::syslog_inner($crate::logging::LogLevel::Info, format_args!($($arg)*));
});
}
#[macro_export]
macro_rules! syslog_warning {
($($arg:tt)*) => ({
$crate::logging::syslog_inner($crate::logging::LogLevel::Warning, format_args!($($arg)*));
});
}
#[macro_export]
macro_rules! syslog_critical {
($($arg:tt)*) => ({
$crate::logging::syslog_inner($crate::logging::LogLevel::Critical, format_args!($($arg)*));
});
}
#[macro_export]
macro_rules! syslog_error {
($($arg:tt)*) => ({
$crate::logging::syslog_inner($crate::logging::LogLevel::Error, format_args!($($arg)*));
});
}
/// Add `message` to the kernel logs, with a priority level of `level`
pub fn syslog(level: LogLevel, message: &str) {
syslog_inner(level, format_args!("{}", message));
}
//TODO: Limit log message size
pub fn syslog_inner(level: LogLevel, message: fmt::Arguments) {
let time = Duration::monotonic();
let (prefix, display) = match level {
LogLevel::Debug => ("DEBUG ", false),
LogLevel::Info => ("INFO ", true),
LogLevel::Warning => ("WARN ", true),
LogLevel::Error => ("ERROR ", true),
LogLevel::Critical => ("CRIT ", true),
};
let _ = write!(unsafe { &mut *::env().log.get() }, "[{}.{:>03}] {}{}\n", time.secs, time.nanos/1000000, prefix, message);
if display {
let _ = write!(::common::debug::SerialConsole::new(), "[{}.{:>03}] {}{}\n", time.secs, time.nanos/1000000, prefix, message);
}
}
|
#[doc = "Writer for register CRYP_K1LR"]
pub type W = crate::W<u32, super::CRYP_K1LR>;
#[doc = "Register CRYP_K1LR `reset()`'s with value 0"]
impl crate::ResetValue for super::CRYP_K1LR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Write proxy for field `K160`"]
pub struct K160_W<'a> {
w: &'a mut W,
}
impl<'a> K160_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Write proxy for field `K161`"]
pub struct K161_W<'a> {
w: &'a mut W,
}
impl<'a> K161_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Write proxy for field `K162`"]
pub struct K162_W<'a> {
w: &'a mut W,
}
impl<'a> K162_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Write proxy for field `K163`"]
pub struct K163_W<'a> {
w: &'a mut W,
}
impl<'a> K163_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Write proxy for field `K164`"]
pub struct K164_W<'a> {
w: &'a mut W,
}
impl<'a> K164_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Write proxy for field `K165`"]
pub struct K165_W<'a> {
w: &'a mut W,
}
impl<'a> K165_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Write proxy for field `K166`"]
pub struct K166_W<'a> {
w: &'a mut W,
}
impl<'a> K166_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Write proxy for field `K167`"]
pub struct K167_W<'a> {
w: &'a mut W,
}
impl<'a> K167_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Write proxy for field `K168`"]
pub struct K168_W<'a> {
w: &'a mut W,
}
impl<'a> K168_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Write proxy for field `K169`"]
pub struct K169_W<'a> {
w: &'a mut W,
}
impl<'a> K169_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Write proxy for field `K170`"]
pub struct K170_W<'a> {
w: &'a mut W,
}
impl<'a> K170_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Write proxy for field `K171`"]
pub struct K171_W<'a> {
w: &'a mut W,
}
impl<'a> K171_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Write proxy for field `K172`"]
pub struct K172_W<'a> {
w: &'a mut W,
}
impl<'a> K172_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
#[doc = "Write proxy for field `K173`"]
pub struct K173_W<'a> {
w: &'a mut W,
}
impl<'a> K173_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "Write proxy for field `K174`"]
pub struct K174_W<'a> {
w: &'a mut W,
}
impl<'a> K174_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14);
self.w
}
}
#[doc = "Write proxy for field `K175`"]
pub struct K175_W<'a> {
w: &'a mut W,
}
impl<'a> K175_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15);
self.w
}
}
#[doc = "Write proxy for field `K176`"]
pub struct K176_W<'a> {
w: &'a mut W,
}
impl<'a> K176_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Write proxy for field `K177`"]
pub struct K177_W<'a> {
w: &'a mut W,
}
impl<'a> K177_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Write proxy for field `K178`"]
pub struct K178_W<'a> {
w: &'a mut W,
}
impl<'a> K178_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Write proxy for field `K179`"]
pub struct K179_W<'a> {
w: &'a mut W,
}
impl<'a> K179_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19);
self.w
}
}
#[doc = "Write proxy for field `K180`"]
pub struct K180_W<'a> {
w: &'a mut W,
}
impl<'a> K180_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
#[doc = "Write proxy for field `K181`"]
pub struct K181_W<'a> {
w: &'a mut W,
}
impl<'a> K181_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 21)) | (((value as u32) & 0x01) << 21);
self.w
}
}
#[doc = "Write proxy for field `K182`"]
pub struct K182_W<'a> {
w: &'a mut W,
}
impl<'a> K182_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 22)) | (((value as u32) & 0x01) << 22);
self.w
}
}
#[doc = "Write proxy for field `K183`"]
pub struct K183_W<'a> {
w: &'a mut W,
}
impl<'a> K183_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 23)) | (((value as u32) & 0x01) << 23);
self.w
}
}
#[doc = "Write proxy for field `K184`"]
pub struct K184_W<'a> {
w: &'a mut W,
}
impl<'a> K184_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
#[doc = "Write proxy for field `K185`"]
pub struct K185_W<'a> {
w: &'a mut W,
}
impl<'a> K185_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 25)) | (((value as u32) & 0x01) << 25);
self.w
}
}
#[doc = "Write proxy for field `K186`"]
pub struct K186_W<'a> {
w: &'a mut W,
}
impl<'a> K186_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26);
self.w
}
}
#[doc = "Write proxy for field `K187`"]
pub struct K187_W<'a> {
w: &'a mut W,
}
impl<'a> K187_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 27)) | (((value as u32) & 0x01) << 27);
self.w
}
}
#[doc = "Write proxy for field `K188`"]
pub struct K188_W<'a> {
w: &'a mut W,
}
impl<'a> K188_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 28)) | (((value as u32) & 0x01) << 28);
self.w
}
}
#[doc = "Write proxy for field `K189`"]
pub struct K189_W<'a> {
w: &'a mut W,
}
impl<'a> K189_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 29)) | (((value as u32) & 0x01) << 29);
self.w
}
}
#[doc = "Write proxy for field `K190`"]
pub struct K190_W<'a> {
w: &'a mut W,
}
impl<'a> K190_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30);
self.w
}
}
#[doc = "Write proxy for field `K191`"]
pub struct K191_W<'a> {
w: &'a mut W,
}
impl<'a> K191_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31);
self.w
}
}
impl W {
#[doc = "Bit 0 - K160"]
#[inline(always)]
pub fn k160(&mut self) -> K160_W {
K160_W { w: self }
}
#[doc = "Bit 1 - K161"]
#[inline(always)]
pub fn k161(&mut self) -> K161_W {
K161_W { w: self }
}
#[doc = "Bit 2 - K162"]
#[inline(always)]
pub fn k162(&mut self) -> K162_W {
K162_W { w: self }
}
#[doc = "Bit 3 - K163"]
#[inline(always)]
pub fn k163(&mut self) -> K163_W {
K163_W { w: self }
}
#[doc = "Bit 4 - K164"]
#[inline(always)]
pub fn k164(&mut self) -> K164_W {
K164_W { w: self }
}
#[doc = "Bit 5 - K165"]
#[inline(always)]
pub fn k165(&mut self) -> K165_W {
K165_W { w: self }
}
#[doc = "Bit 6 - K166"]
#[inline(always)]
pub fn k166(&mut self) -> K166_W {
K166_W { w: self }
}
#[doc = "Bit 7 - K167"]
#[inline(always)]
pub fn k167(&mut self) -> K167_W {
K167_W { w: self }
}
#[doc = "Bit 8 - K168"]
#[inline(always)]
pub fn k168(&mut self) -> K168_W {
K168_W { w: self }
}
#[doc = "Bit 9 - K169"]
#[inline(always)]
pub fn k169(&mut self) -> K169_W {
K169_W { w: self }
}
#[doc = "Bit 10 - K170"]
#[inline(always)]
pub fn k170(&mut self) -> K170_W {
K170_W { w: self }
}
#[doc = "Bit 11 - K171"]
#[inline(always)]
pub fn k171(&mut self) -> K171_W {
K171_W { w: self }
}
#[doc = "Bit 12 - K172"]
#[inline(always)]
pub fn k172(&mut self) -> K172_W {
K172_W { w: self }
}
#[doc = "Bit 13 - K173"]
#[inline(always)]
pub fn k173(&mut self) -> K173_W {
K173_W { w: self }
}
#[doc = "Bit 14 - K174"]
#[inline(always)]
pub fn k174(&mut self) -> K174_W {
K174_W { w: self }
}
#[doc = "Bit 15 - K175"]
#[inline(always)]
pub fn k175(&mut self) -> K175_W {
K175_W { w: self }
}
#[doc = "Bit 16 - K176"]
#[inline(always)]
pub fn k176(&mut self) -> K176_W {
K176_W { w: self }
}
#[doc = "Bit 17 - K177"]
#[inline(always)]
pub fn k177(&mut self) -> K177_W {
K177_W { w: self }
}
#[doc = "Bit 18 - K178"]
#[inline(always)]
pub fn k178(&mut self) -> K178_W {
K178_W { w: self }
}
#[doc = "Bit 19 - K179"]
#[inline(always)]
pub fn k179(&mut self) -> K179_W {
K179_W { w: self }
}
#[doc = "Bit 20 - K180"]
#[inline(always)]
pub fn k180(&mut self) -> K180_W {
K180_W { w: self }
}
#[doc = "Bit 21 - K181"]
#[inline(always)]
pub fn k181(&mut self) -> K181_W {
K181_W { w: self }
}
#[doc = "Bit 22 - K182"]
#[inline(always)]
pub fn k182(&mut self) -> K182_W {
K182_W { w: self }
}
#[doc = "Bit 23 - K183"]
#[inline(always)]
pub fn k183(&mut self) -> K183_W {
K183_W { w: self }
}
#[doc = "Bit 24 - K184"]
#[inline(always)]
pub fn k184(&mut self) -> K184_W {
K184_W { w: self }
}
#[doc = "Bit 25 - K185"]
#[inline(always)]
pub fn k185(&mut self) -> K185_W {
K185_W { w: self }
}
#[doc = "Bit 26 - K186"]
#[inline(always)]
pub fn k186(&mut self) -> K186_W {
K186_W { w: self }
}
#[doc = "Bit 27 - K187"]
#[inline(always)]
pub fn k187(&mut self) -> K187_W {
K187_W { w: self }
}
#[doc = "Bit 28 - K188"]
#[inline(always)]
pub fn k188(&mut self) -> K188_W {
K188_W { w: self }
}
#[doc = "Bit 29 - K189"]
#[inline(always)]
pub fn k189(&mut self) -> K189_W {
K189_W { w: self }
}
#[doc = "Bit 30 - K190"]
#[inline(always)]
pub fn k190(&mut self) -> K190_W {
K190_W { w: self }
}
#[doc = "Bit 31 - K191"]
#[inline(always)]
pub fn k191(&mut self) -> K191_W {
K191_W { w: self }
}
}
|
fn main() {
let str_hello = "Hello";
let str_world = "world!";
let str_hello_world = String::from(str_hello) + ", " + str_world;
let str_even = "even number";
let str_odd = "odd number";
for n in 1..5 {
print_even_or_odd(n, &str_hello_world);
//let even_or_odd = is_even_or_odd_by(n);
if is_even_or_odd_by(n) {
println!(" {}", str_even);
}
else {
println!(" {}", str_odd);
}
}
for i in str_hello.chars() {
println!("{}", i);
}
}
fn print_even_or_odd(n: u32, str_hello_world: &String) -> (){
println!("{} {}", str_hello_world, n);
}
fn is_even_or_odd_by(n: u32) -> bool{
if n%2 == 0 {
return true;
}
else {
return false;
}
//return false;
} |
/*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT license.
*/
use std::fs::File;
use std::io::{Seek, Read};
use crate::common::{ANNResult, ANNError};
/// Sequential cached reads
pub struct CachedReader {
/// File reader
reader: File,
/// # bytes to cache in one shot read
cache_size: u64,
/// Underlying buf for cache
cache_buf: Vec<u8>,
/// Offset into cache_buf for cur_pos
cur_off: u64,
/// File size
fsize: u64,
}
impl CachedReader {
pub fn new(filename: &str, cache_size: u64) -> std::io::Result<Self> {
let mut reader = File::open(filename)?;
let metadata = reader.metadata()?;
let fsize = metadata.len();
let cache_size = cache_size.min(fsize);
let mut cache_buf = vec![0; cache_size as usize];
reader.read_exact(&mut cache_buf)?;
println!("Opened: {}, size: {}, cache_size: {}", filename, fsize, cache_size);
Ok(Self {
reader,
cache_size,
cache_buf,
cur_off: 0,
fsize,
})
}
pub fn get_file_size(&self) -> u64 {
self.fsize
}
pub fn read(&mut self, read_buf: &mut [u8]) -> ANNResult<()> {
let n_bytes = read_buf.len() as u64;
if n_bytes <= (self.cache_size - self.cur_off) {
// case 1: cache contains all data
read_buf.copy_from_slice(&self.cache_buf[(self.cur_off as usize)..(self.cur_off as usize + n_bytes as usize)]);
self.cur_off += n_bytes;
} else {
// case 2: cache contains some data
let cached_bytes = self.cache_size - self.cur_off;
if n_bytes - cached_bytes > self.fsize - self.reader.stream_position()? {
return Err(ANNError::log_index_error(format!(
"Reading beyond end of file, n_bytes: {} cached_bytes: {} fsize: {} current pos: {}",
n_bytes, cached_bytes, self.fsize, self.reader.stream_position()?))
);
}
read_buf[..cached_bytes as usize].copy_from_slice(&self.cache_buf[self.cur_off as usize..]);
// go to disk and fetch more data
self.reader.read_exact(&mut read_buf[cached_bytes as usize..])?;
// reset cur off
self.cur_off = self.cache_size;
let size_left = self.fsize - self.reader.stream_position()?;
if size_left >= self.cache_size {
self.reader.read_exact(&mut self.cache_buf)?;
self.cur_off = 0;
}
// note that if size_left < cache_size, then cur_off = cache_size,
// so subsequent reads will all be directly from file
}
Ok(())
}
pub fn read_u32(&mut self) -> ANNResult<u32> {
let mut bytes = [0u8; 4];
self.read(&mut bytes)?;
Ok(u32::from_le_bytes(bytes))
}
}
#[cfg(test)]
mod cached_reader_test {
use std::fs;
use super::*;
#[test]
fn cached_reader_works() {
let file_name = "cached_reader_works_test.bin";
//npoints=2, dim=8, 2 vectors [1.0;8] [2.0;8]
let data: [u8; 72] = [2, 0, 1, 2, 8, 0, 1, 3,
0x00, 0x01, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x40, 0x00, 0x00, 0x80, 0x40,
0x00, 0x00, 0xa0, 0x40, 0x00, 0x00, 0xc0, 0x40, 0x00, 0x00, 0xe0, 0x40, 0x00, 0x00, 0x00, 0x41,
0x00, 0x00, 0x10, 0x41, 0x00, 0x00, 0x20, 0x41, 0x00, 0x00, 0x30, 0x41, 0x00, 0x00, 0x40, 0x41,
0x00, 0x00, 0x50, 0x41, 0x00, 0x00, 0x60, 0x41, 0x00, 0x00, 0x70, 0x41, 0x00, 0x11, 0x80, 0x41];
std::fs::write(file_name, data).expect("Failed to write sample file");
let mut reader = CachedReader::new(file_name, 8).unwrap();
assert_eq!(reader.get_file_size(), 72);
assert_eq!(reader.cache_size, 8);
let mut all_from_cache_buf = vec![0; 4];
reader.read(all_from_cache_buf.as_mut_slice()).unwrap();
assert_eq!(all_from_cache_buf, [2, 0, 1, 2]);
assert_eq!(reader.cur_off, 4);
let mut partial_from_cache_buf = vec![0; 6];
reader.read(partial_from_cache_buf.as_mut_slice()).unwrap();
assert_eq!(partial_from_cache_buf, [8, 0, 1, 3, 0x00, 0x01]);
assert_eq!(reader.cur_off, 0);
let mut over_cache_size_buf = vec![0; 60];
reader.read(over_cache_size_buf.as_mut_slice()).unwrap();
assert_eq!(
over_cache_size_buf,
[0x80, 0x3f, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x40, 0x00, 0x00, 0x80, 0x40,
0x00, 0x00, 0xa0, 0x40, 0x00, 0x00, 0xc0, 0x40, 0x00, 0x00, 0xe0, 0x40, 0x00, 0x00, 0x00, 0x41,
0x00, 0x00, 0x10, 0x41, 0x00, 0x00, 0x20, 0x41, 0x00, 0x00, 0x30, 0x41, 0x00, 0x00, 0x40, 0x41,
0x00, 0x00, 0x50, 0x41, 0x00, 0x00, 0x60, 0x41, 0x00, 0x00, 0x70, 0x41, 0x00, 0x11]
);
let mut remaining_less_than_cache_size_buf = vec![0; 2];
reader.read(remaining_less_than_cache_size_buf.as_mut_slice()).unwrap();
assert_eq!(remaining_less_than_cache_size_buf, [0x80, 0x41]);
assert_eq!(reader.cur_off, reader.cache_size);
fs::remove_file(file_name).expect("Failed to delete file");
}
#[test]
#[should_panic(expected = "n_bytes: 73 cached_bytes: 8 fsize: 72 current pos: 8")]
fn failed_for_reading_beyond_end_of_file() {
let file_name = "failed_for_reading_beyond_end_of_file_test.bin";
//npoints=2, dim=8, 2 vectors [1.0;8] [2.0;8]
let data: [u8; 72] = [2, 0, 1, 2, 8, 0, 1, 3,
0x00, 0x01, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x40, 0x00, 0x00, 0x80, 0x40,
0x00, 0x00, 0xa0, 0x40, 0x00, 0x00, 0xc0, 0x40, 0x00, 0x00, 0xe0, 0x40, 0x00, 0x00, 0x00, 0x41,
0x00, 0x00, 0x10, 0x41, 0x00, 0x00, 0x20, 0x41, 0x00, 0x00, 0x30, 0x41, 0x00, 0x00, 0x40, 0x41,
0x00, 0x00, 0x50, 0x41, 0x00, 0x00, 0x60, 0x41, 0x00, 0x00, 0x70, 0x41, 0x00, 0x11, 0x80, 0x41];
std::fs::write(file_name, data).expect("Failed to write sample file");
let mut reader = CachedReader::new(file_name, 8).unwrap();
fs::remove_file(file_name).expect("Failed to delete file");
let mut over_size_buf = vec![0; 73];
reader.read(over_size_buf.as_mut_slice()).unwrap();
}
}
|
#[derive(Clone)]
pub enum SquareContents {
Blocker,
TextContent(String, Option<SquareModifier>),
}
#[derive(Clone)]
pub enum SquareModifier {
Shading,
Circle,
}
#[derive(Clone)]
pub struct Square {
pub content: SquareContents,
pub label: Option<u32>,
pub x: u32,
pub y: u32,
pub across_entry: Option<u32>,
pub next_across: Option<usize>,
pub prev_across: Option<usize>,
pub across_clue_text: Option<String>,
pub down_entry: Option<u32>,
pub next_down: Option<usize>,
pub prev_down: Option<usize>,
pub down_clue_text: Option<String>,
}
impl Square {
fn new(x: u32, y: u32) -> Self {
Square {
content: SquareContents::TextContent("".to_string(),None),
label: None,
x,
y,
across_entry: None,
next_across: None,
prev_across: None,
across_clue_text: None,
down_entry: None,
next_down: None,
prev_down: None,
down_clue_text: None,
}
}
}
#[derive(Clone, Copy, Debug)]
pub enum EntryVariant {
Across,
Down,
}
#[derive(Clone)]
pub struct PuzzleEntry {
pub label: u32,
pub variant: EntryVariant,
pub member_indices: Vec<usize>,
pub clue: String,
}
#[derive(Clone, Debug)]
pub enum PuzzleType {
Mini,
Weekday,
WeekdayAsymmetric,
Sunday,
}
pub struct Puzzle {
pub title: String,
dim: usize,
pub variant: PuzzleType,
pub squares: Vec<Square>,
pub across_entries: Vec<PuzzleEntry>,
pub down_entries: Vec<PuzzleEntry>,
pub fill_only: bool,
pub solved_hash: Option<u64>,
}
impl Puzzle {
pub fn new(variant: PuzzleType) -> Self {
let d = match_puzzle_dim(&variant);
let mut v: Vec<Square> = Vec::with_capacity(d * d);
// Squares are stored in row-major order
for y_index in 0..d as u32{
for x_index in 0..d as u32{
v.push(Square::new(x_index,y_index));
}
}
let mut p = Puzzle {
title: "New Puzzle".to_string(),
dim: d,
variant,
squares: v,
across_entries: Vec::new(),
down_entries: Vec::new(),
fill_only: false,
solved_hash: None,
};
p.calculate_clues();
p
}
pub fn at(&self, x: u32, y: u32) -> &Square {
let index = self.xy_to_index(x, y);
&self.squares[index]
}
pub fn cycle_blocker(&mut self, x: u32, y: u32, nested: bool) {
if !self.fill_only {
let index = self.xy_to_index(x, y);
match self.squares[index].content {
SquareContents::Blocker => {
self.squares[index].content = SquareContents::TextContent("".to_string(),None);
},
SquareContents::TextContent(_,_) => {
self.squares[index].across_clue_text = None;
self.squares[index].down_clue_text = None;
self.squares[index].content = SquareContents::Blocker;
},
};
match self.variant {
PuzzleType::Weekday | PuzzleType::Sunday => {
// Also block symmetric piece.
if !nested {
if (y != self.dim as u32 / 2) || (x != self.dim as u32 / 2) {
self.cycle_blocker(self.dim as u32 - x - 1, self.dim as u32 - y - 1, true);
}
}
},
_ => (),
}
self.calculate_clues();
}
}
pub fn cycle_modifier(&mut self, x: u32, y: u32) -> bool {
if !self.fill_only {
let index = self.xy_to_index(x, y);
match &self.squares[index].content {
SquareContents::Blocker => false,
SquareContents::TextContent(s,modifier_option) => {
match modifier_option {
None => {
self.squares[index].content = SquareContents::TextContent(s.clone(),Some(SquareModifier::Shading));
}
Some(SquareModifier::Shading) => {
self.squares[index].content = SquareContents::TextContent(s.clone(),Some(SquareModifier::Circle));
}
Some(SquareModifier::Circle) => {
self.squares[index].content = SquareContents::TextContent(s.clone(),None);
}
};
true
},
}
} else {
false
}
}
pub fn modify_sq_contents(&mut self, x: u32, y: u32, c: char, append: bool) {
let index = self.xy_to_index(x, y);
match &self.squares[index].content {
SquareContents::TextContent(s,modifier_option) => {
if append {
let mut newstr = s.clone();
newstr.push(c);
self.squares[index].content = SquareContents::TextContent(newstr,modifier_option.clone());
} else {
self.squares[index].content = SquareContents::TextContent(c.to_string(),modifier_option.clone());
}
}
_ => ()
}
}
pub fn clear_sq_contents(&mut self, x: u32, y: u32) -> bool {
let index = self.xy_to_index(x, y);
let mut is_empty = false;
match &self.squares[index].content {
SquareContents::TextContent(s,modifier_option) => {
if s.is_empty() {
is_empty = true;
};
self.squares[index].content = SquareContents::TextContent(String::new(),modifier_option.clone());
},
_ => {}
}
is_empty
}
pub fn calculate_clues(&mut self) {
if !self.fill_only {
let mut start_of_across_clue: Vec<bool> = vec![false; self.dim * self.dim];
for y in 0..self.dim as u32 {
let start_index = self.xy_to_index(0, y);
let mut was_blocker = match self.squares[start_index].content {
SquareContents::Blocker => {
true
},
_ => {
start_of_across_clue[start_index] = true;
false
},
};
for x in 1..self.dim as u32 {
let index = self.xy_to_index(x, y);
match self.squares[index].content {
SquareContents::Blocker => {
was_blocker = true;
},
_ => {
if was_blocker {
start_of_across_clue[index] = true;
was_blocker = false;
}
},
}
}
}
let mut start_of_down_clue: Vec<bool> = vec![false; self.dim * self.dim];
for x in 0..self.dim as u32 {
let start_index = self.xy_to_index(x, 0);
let mut was_blocker = match self.squares[start_index].content {
SquareContents::Blocker => {
true
},
_ => {
start_of_down_clue[start_index] = true;
false
},
};
for y in 1..self.dim as u32 {
let index = self.xy_to_index(x, y);
match self.squares[index].content {
SquareContents::Blocker => {
was_blocker = true;
},
_ => {
if was_blocker {
start_of_down_clue[index] = true;
was_blocker = false;
}
},
}
}
}
let mut current_label = 0;
for y in 0..self.dim as u32 {
for x in 0..self.dim as u32 {
let index = self.xy_to_index(x, y);
let l = if start_of_across_clue[index] || start_of_down_clue[index] {
current_label += 1;
Some(current_label)
} else {
None
};
self.squares[index].label = l;
}
}
// Construct across entries
self.across_entries.clear();
for y in 0..self.dim as u32 {
let mut current_across = 0;
let mut entries: Vec<usize> = Vec::new();
for x in 0..self.dim as u32 {
let index = self.xy_to_index(x, y);
if let Some(l) = self.squares[index].label {
if entries.len() == 0 {
current_across = l;
}
};
match &self.squares[index].content {
SquareContents::Blocker => {
self.squares[index].across_entry = None;
if entries.len() > 0 {
// assign next squares
for e_index in 0..(entries.len()-1) {
self.squares[entries[e_index]].next_across = Some(entries[e_index+1]);
}
self.squares[*entries.last().unwrap()].next_across = None;
// assign prev squares
for e_index in 1..entries.len() {
self.squares[entries[e_index]].prev_across = Some(entries[e_index - 1]);
}
self.squares[entries[0]].prev_across = None;
// set clue texts
let text = match &self.squares[entries[0]].across_clue_text {
Some(s) => { s.clone() }
None => { self.squares[entries[0]].across_clue_text = Some("".to_string());
"".to_string()
}
};
for e_index in 1..entries.len() {
self.squares[entries[e_index]].across_clue_text = None;
}
// Push to across entries list
let e = PuzzleEntry {
label: current_across,
variant: EntryVariant::Across,
member_indices: entries,
clue: text,
};
self.across_entries.push(e);
entries = Vec::new();
}
}
SquareContents::TextContent(_,_) => {
self.squares[index].across_entry = Some(current_across);
entries.push(index);
}
}
}
if entries.len() > 0 {
// assign next squares
for e_index in 0..(entries.len()-1) {
self.squares[entries[e_index]].next_across = Some(entries[e_index+1]);
}
self.squares[*entries.last().unwrap()].next_across = None;
// assign prev squares
for e_index in 1..entries.len() {
self.squares[entries[e_index]].prev_across = Some(entries[e_index - 1]);
}
self.squares[entries[0]].prev_across = None;
// set clue texts
let text = match &self.squares[entries[0]].across_clue_text {
Some(s) => { s.clone() }
None => { self.squares[entries[0]].across_clue_text = Some("".to_string());
"".to_string()
}
};
for e_index in 1..entries.len() {
self.squares[entries[e_index]].across_clue_text = None;
}
// Push to across entries list
let e = PuzzleEntry {
label: current_across,
variant: EntryVariant::Across,
member_indices: entries,
clue: text,
};
self.across_entries.push(e);
}
}
// Construct down entries
self.down_entries.clear();
for x in 0..self.dim as u32 {
let mut current_down = 0;
let mut entries: Vec<usize> = Vec::new();
for y in 0..self.dim as u32 {
let index = self.xy_to_index(x, y);
if let Some(l) = self.squares[index].label {
if entries.len() == 0 {
current_down = l;
}
};
match &self.squares[index].content {
SquareContents::Blocker => {
self.squares[index].down_entry = None;
if entries.len() > 0 {
// assign next squares
for e_index in 0..(entries.len()-1) {
self.squares[entries[e_index]].next_down = Some(entries[e_index+1]);
}
self.squares[*entries.last().unwrap()].next_down = None;
// assign prev squares
for e_index in 1..entries.len() {
self.squares[entries[e_index]].prev_down = Some(entries[e_index - 1]);
}
self.squares[entries[0]].prev_down = None;
// set clue texts
let text = match &self.squares[entries[0]].down_clue_text {
Some(s) => { s.clone() }
None => { self.squares[entries[0]].down_clue_text = Some("".to_string());
"".to_string()
}
};
for e_index in 1..entries.len() {
self.squares[entries[e_index]].down_clue_text = None;
}
// Push to entries list
let e = PuzzleEntry {
label: current_down,
variant: EntryVariant::Down,
member_indices: entries,
clue: text,
};
self.down_entries.push(e);
entries = Vec::new();
}
}
SquareContents::TextContent(_,_) => {
self.squares[index].down_entry = Some(current_down);
entries.push(index);
}
}
};
if entries.len() > 0 {
// assign next squares
for e_index in 0..(entries.len()-1) {
self.squares[entries[e_index]].next_down = Some(entries[e_index+1]);
}
self.squares[*entries.last().unwrap()].next_down = None;
// assign prev squares
for e_index in 1..entries.len() {
self.squares[entries[e_index]].prev_down = Some(entries[e_index - 1]);
}
self.squares[entries[0]].prev_down = None;
// set clue texts
let text = match &self.squares[entries[0]].down_clue_text {
Some(s) => { s.clone() }
None => { self.squares[entries[0]].down_clue_text = Some("".to_string());
"".to_string()
}
};
for e_index in 1..entries.len() {
self.squares[entries[e_index]].down_clue_text = None;
}
// Push to entries list
let e = PuzzleEntry {
label: current_down,
variant: EntryVariant::Down,
member_indices: entries,
clue: text,
};
self.down_entries.push(e);
}
}
// Down entries won't be in increasing order, so sort them.
self.down_entries.sort_by(|a,b| a.label.cmp(&b.label));
}
}
pub fn set_clue_text(&mut self, label: u32, variant: EntryVariant, text: String) {
match variant {
EntryVariant::Across => {
let entry = self.across_entries.iter_mut().find(|x| x.label == label).unwrap();
entry.clue = text.clone();
self.squares[entry.member_indices[0]].across_clue_text = Some(text);
},
EntryVariant::Down => {
let entry = self.down_entries.iter_mut().find(|x| x.label == label).unwrap();
entry.clue = text.clone();
self.squares[entry.member_indices[0]].down_clue_text = Some(text);
},
}
}
fn xy_to_index(&self, x: u32, y: u32) -> usize {
(y * self.dim as u32 + x) as usize
}
pub fn get_clue_entries(&self, x: u32, y: u32) -> (Option<&PuzzleEntry>,Option<&PuzzleEntry>) {
let sq = self.at(x,y);
match &sq.content {
SquareContents::Blocker => {
(None,None)
},
SquareContents::TextContent(_s,_m) => {
let across = if let Some(a_entry) = sq.across_entry {
self.across_entries.iter().find(|x| x.label == a_entry)
} else {
None
};
let down = if let Some(d_entry) = sq.down_entry {
self.down_entries.iter().find(|x| x.label == d_entry)
} else {
None
};
(across,down)
}
}
}
pub fn get_square_clue_texts(&self, x: u32, y: u32) -> (String,String) {
let (a,d) = self.get_clue_entries(x,y);
let across = match a {
Some(entry) => {
let clue_t = entry.clue.clone();
entry.label.to_string() + "A: " + &clue_t
},
None => {
"".to_string()
},
};
let down = match d {
Some(entry) => {
let clue_t = entry.clue.clone();
entry.label.to_string() + "D: " + &clue_t
},
None => {
"".to_string()
},
};
(across,down)
}
pub fn get_puzzle_total_hash(&self) -> u64 {
use std::hash::{Hash, Hasher};
let mut s = String::new();
for sq in self.squares.iter() {
let sq_c = match &sq.content {
SquareContents::TextContent(s,_m) => {
s.as_str()
},
SquareContents::Blocker => {
"#"
}
};
s.push_str(sq_c);
};
let mut hasher = std::collections::hash_map::DefaultHasher::new();
s.hash(&mut hasher);
hasher.finish()
}
pub fn is_solved(&self) -> bool {
let curr_hash = self.get_puzzle_total_hash();
if let Some(h) = self.solved_hash {
curr_hash == h
} else {
false
}
}
}
pub fn match_puzzle_dim(p: &PuzzleType) -> usize {
match p {
PuzzleType::Mini => 5,
PuzzleType::Weekday => 15,
PuzzleType::WeekdayAsymmetric => 15,
PuzzleType::Sunday => 21,
}
}
|
use std::fs::read_to_string;
fn get_input() -> Vec<i32> {
read_to_string("../inputs/9.txt").unwrap()
.split_whitespace()
.flat_map(|i| i.parse())
.collect()
}
pub fn solve() {
let numbers = get_input();
print!("Day 9 part 1: {}\n", part_1(&numbers, 25));
print!("Day 9 part 2: {}\n", part_2(&numbers, 25));
}
fn part_1(numbers: &Vec<i32>, preamble_length: usize) -> i32 {
numbers
.iter()
.enumerate()
.skip(preamble_length)
.find_map(|(num_index, &num)| {
for left in (num_index - preamble_length)..num_index {
for right in (left + 1)..num_index {
if numbers[left] + numbers[right] == num {
return None;
}
}
}
Some(num)
})
.unwrap()
}
fn part_2(numbers: &Vec<i32>, preamble_length: usize) -> i32 {
let invalid_num = part_1(numbers, preamble_length);
for left in 0..numbers.len() - 1 {
for right in (left+1)..numbers.len() {
let range = &numbers[left..right];
let sum = range.iter().sum();
if invalid_num < sum { break; };
if invalid_num == sum { return range.iter().min().unwrap() + range.iter().max().unwrap(); }
}
}
unreachable!("No Range found")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_part1_with_example() {
let nums = vec!(35, 20, 15, 25, 47, 40, 62, 55, 65, 95, 102, 117, 150, 182, 127, 219, 299, 277, 309, 576);
let result = part_1(&nums, 5);
assert_eq!(result, 127);
}
#[test]
fn test_part1_with_real() {
let nums = get_input();
let result = part_1(&nums, 25);
assert_eq!(result, 15690279);
}
#[test]
fn test_part2_with_example() {
let nums = vec!(35, 20, 15, 25, 47, 40, 62, 55, 65, 95, 102, 117, 150, 182, 127, 219, 299, 277, 309, 576);
let result = part_2(&nums, 5);
assert_eq!(result, 62);
}
#[test]
fn test_part2_with_real() {
let nums = get_input();
let result = part_2(&nums, 25);
assert_eq!(result, 2174232);
}
}
|
use std::env;
use std::fs;
use std::fs::metadata;
use std::path::Path;
use std::path::PathBuf;
use std::process;
pub struct Setup {
pub input: Vec<String>,
pub output: String,
}
impl Setup {
pub fn generate() -> Setup {
let args: Vec<String> = env::args().collect();
let input_files;
let mut output_file;
check_input_validity(&args);
if metadata(&args[1]).unwrap().is_dir() {
input_files = fs::read_dir(&args[1])
.unwrap()
.map(|entry| entry.unwrap().path())
.filter(|path| path.is_file())
.filter(|path| path.extension().unwrap() == "vm")
.map(|path| path.to_str().unwrap().to_string())
.collect();
let dir_name = Path::new(&args[1]).file_name().unwrap().to_str().unwrap();
output_file = String::from(dir_name);
output_file.push_str(".asm");
} else {
input_files = vec![String::from(&args[1])];
let mut input_file = PathBuf::from(&args[1]);
input_file.set_extension("asm");
output_file = String::from(input_file.to_str().unwrap());
};
Setup {
input: input_files,
output: output_file,
}
}
}
fn check_input_validity(args: &Vec<String>) {
// Check that input path is given
if args.len() < 2 {
println!("No input file/directory provided");
process::exit(1);
};
// Check that input path exists
let input_path_raw = Path::new(&args[1]);
let meta = metadata(input_path_raw).unwrap();
if !input_path_raw.exists() {
println!("Error. Provided input file/dir does not exist. Exiting...\n");
process::exit(1);
};
if meta.is_file() {
assert_eq!(input_path_raw.extension().unwrap(), "vm");
} else if meta.is_dir() {
println!("Loading directory.\n");
}
}
|
#[doc = "Register `CFGR2` reader"]
pub type R = crate::R<CFGR2_SPEC>;
#[doc = "Register `CFGR2` writer"]
pub type W = crate::W<CFGR2_SPEC>;
#[doc = "Field `LOCKUP_LOCK` reader - Cortex-M0+ LOCKUP bit enable bit"]
pub type LOCKUP_LOCK_R = crate::BitReader;
#[doc = "Field `LOCKUP_LOCK` writer - Cortex-M0+ LOCKUP bit enable bit"]
pub type LOCKUP_LOCK_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SRAM_PARITY_LOCK` reader - SRAM parity lock bit"]
pub type SRAM_PARITY_LOCK_R = crate::BitReader;
#[doc = "Field `SRAM_PARITY_LOCK` writer - SRAM parity lock bit"]
pub type SRAM_PARITY_LOCK_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `PVD_LOCK` reader - PVD lock enable bit"]
pub type PVD_LOCK_R = crate::BitReader;
#[doc = "Field `PVD_LOCK` writer - PVD lock enable bit"]
pub type PVD_LOCK_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ECC_LOCK` reader - ECC error lock bit"]
pub type ECC_LOCK_R = crate::BitReader;
#[doc = "Field `ECC_LOCK` writer - ECC error lock bit"]
pub type ECC_LOCK_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SRAM_PEF` reader - SRAM parity error flag"]
pub type SRAM_PEF_R = crate::BitReader;
#[doc = "Field `SRAM_PEF` writer - SRAM parity error flag"]
pub type SRAM_PEF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - Cortex-M0+ LOCKUP bit enable bit"]
#[inline(always)]
pub fn lockup_lock(&self) -> LOCKUP_LOCK_R {
LOCKUP_LOCK_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - SRAM parity lock bit"]
#[inline(always)]
pub fn sram_parity_lock(&self) -> SRAM_PARITY_LOCK_R {
SRAM_PARITY_LOCK_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - PVD lock enable bit"]
#[inline(always)]
pub fn pvd_lock(&self) -> PVD_LOCK_R {
PVD_LOCK_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - ECC error lock bit"]
#[inline(always)]
pub fn ecc_lock(&self) -> ECC_LOCK_R {
ECC_LOCK_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 8 - SRAM parity error flag"]
#[inline(always)]
pub fn sram_pef(&self) -> SRAM_PEF_R {
SRAM_PEF_R::new(((self.bits >> 8) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Cortex-M0+ LOCKUP bit enable bit"]
#[inline(always)]
#[must_use]
pub fn lockup_lock(&mut self) -> LOCKUP_LOCK_W<CFGR2_SPEC, 0> {
LOCKUP_LOCK_W::new(self)
}
#[doc = "Bit 1 - SRAM parity lock bit"]
#[inline(always)]
#[must_use]
pub fn sram_parity_lock(&mut self) -> SRAM_PARITY_LOCK_W<CFGR2_SPEC, 1> {
SRAM_PARITY_LOCK_W::new(self)
}
#[doc = "Bit 2 - PVD lock enable bit"]
#[inline(always)]
#[must_use]
pub fn pvd_lock(&mut self) -> PVD_LOCK_W<CFGR2_SPEC, 2> {
PVD_LOCK_W::new(self)
}
#[doc = "Bit 3 - ECC error lock bit"]
#[inline(always)]
#[must_use]
pub fn ecc_lock(&mut self) -> ECC_LOCK_W<CFGR2_SPEC, 3> {
ECC_LOCK_W::new(self)
}
#[doc = "Bit 8 - SRAM parity error flag"]
#[inline(always)]
#[must_use]
pub fn sram_pef(&mut self) -> SRAM_PEF_W<CFGR2_SPEC, 8> {
SRAM_PEF_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "SYSCFG configuration register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cfgr2::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cfgr2::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CFGR2_SPEC;
impl crate::RegisterSpec for CFGR2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`cfgr2::R`](R) reader structure"]
impl crate::Readable for CFGR2_SPEC {}
#[doc = "`write(|w| ..)` method takes [`cfgr2::W`](W) writer structure"]
impl crate::Writable for CFGR2_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CFGR2 to value 0"]
impl crate::Resettable for CFGR2_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#[doc = "Reader of register IC_CLR_RX_UNDER"]
pub type R = crate::R<u32, super::IC_CLR_RX_UNDER>;
#[doc = "Reader of field `CLR_RX_UNDER`"]
pub type CLR_RX_UNDER_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 0 - Read this register to clear the RX_UNDER interrupt (bit 0) of the IC_RAW_INTR_STAT register.\\n\\n Reset value: 0x0"]
#[inline(always)]
pub fn clr_rx_under(&self) -> CLR_RX_UNDER_R {
CLR_RX_UNDER_R::new((self.bits & 0x01) != 0)
}
}
|
use proconio::input;
use std::cmp::min;
fn main() {
input! {
a:u32,
b:u32,
x:u32,
y:u32,
}
if a == b {
println!("{}", x);
} else if a == b + 1 {
println!("{}", x);
} else {
let steps = if a <= b { b - a } else { a - b };
if a <= b {
let ans = steps * min(y, 2 * x) + x;
println!("{}", ans);
} else {
let ans = (steps - 1) * min(y, 2 * x) + x;
println!("{}", ans);
}
}
}
|
// Copyright 2018-2019 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
mod encodables;
mod primitives;
use std::marker::PhantomData;
use crate::error::DataError;
pub use encodables::*;
pub use primitives::*;
pub(crate) struct Key<K> {
bytes: Vec<u8>,
phantom: PhantomData<K>,
}
impl<K> AsRef<[u8]> for Key<K>
where
K: EncodableKey,
{
fn as_ref(&self) -> &[u8] {
self.bytes.as_ref()
}
}
impl<K> Key<K>
where
K: EncodableKey,
{
#[allow(clippy::new_ret_no_self)]
pub fn new(k: &K) -> Result<Key<K>, DataError> {
Ok(Key {
bytes: k.to_bytes()?,
phantom: PhantomData,
})
}
}
|
// This file is part of Substrate.
// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Telemetry utilities.
//!
//! Calling `init_telemetry` registers a global `slog` logger using `slog_scope::set_global_logger`.
//! After that, calling `slog_scope::with_logger` will return a logger that sends information to
//! the telemetry endpoints. The `telemetry!` macro is a short-cut for calling
//! `slog_scope::with_logger` followed with `slog_log!`.
//!
//! Note that you are supposed to only ever use `telemetry!` and not `slog_scope::with_logger` at
//! the moment. Substrate may eventually be reworked to get proper `slog` support, including sending
//! information to the telemetry.
//!
//! The [`Telemetry`] struct implements `Stream` and must be polled regularly (or sent to a
//! background thread/task) in order for the telemetry to properly function. Dropping the object
//! will also deregister the global logger and replace it with a logger that discards messages.
//! The `Stream` generates [`TelemetryEvent`]s.
//!
//! > **Note**: Cloning the [`Telemetry`] and polling from multiple clones has an unspecified
//! behaviour.
//!
//! # Example
//!
//! ```no_run
//! use futures::prelude::*;
//!
//! let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig {
//! endpoints: sc_telemetry::TelemetryEndpoints::new(vec![
//! // The `0` is the maximum verbosity level of messages to send to this endpoint.
//! ("wss://example.com".into(), 0)
//! ]).expect("Invalid URL or multiaddr provided"),
//! // Can be used to pass an external implementation of WebSockets.
//! wasm_external_transport: None,
//! });
//!
//! // The `telemetry` object implements `Stream` and must be processed.
//! std::thread::spawn(move || {
//! futures::executor::block_on(telemetry.for_each(|_| future::ready(())));
//! });
//!
//! // Sends a message on the telemetry.
//! sc_telemetry::telemetry!(sc_telemetry::SUBSTRATE_INFO; "test";
//! "foo" => "bar",
//! )
//! ```
use futures::{channel::mpsc, prelude::*};
use libp2p::{wasm_ext, Multiaddr};
use log::{error, warn};
use parking_lot::Mutex;
use serde::{Deserialize, Deserializer, Serialize};
use std::{
pin::Pin,
sync::Arc,
task::{Context, Poll},
time::Duration,
};
use wasm_timer::Instant;
pub use libp2p::wasm_ext::ExtTransport;
pub use slog;
pub use slog_scope::with_logger;
mod async_record;
mod worker;
/// Configuration for telemetry.
pub struct TelemetryConfig {
/// Collection of telemetry WebSocket servers with a corresponding verbosity level.
pub endpoints: TelemetryEndpoints,
/// Optional external implementation of a libp2p transport. Used in WASM contexts where we need
/// some binding between the networking provided by the operating system or environment and
/// libp2p.
///
/// This parameter exists whatever the target platform is, but it is expected to be set to
/// `Some` only when compiling for WASM.
///
/// > **Important**: Each individual call to `write` corresponds to one message. There is no
/// > internal buffering going on. In the context of WebSockets, each `write`
/// > must be one individual WebSockets frame.
pub wasm_external_transport: Option<wasm_ext::ExtTransport>,
}
/// List of telemetry servers we want to talk to. Contains the URL of the server, and the
/// maximum verbosity level.
///
/// The URL string can be either a URL or a multiaddress.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct TelemetryEndpoints(
#[serde(deserialize_with = "url_or_multiaddr_deser")] Vec<(Multiaddr, u8)>,
);
/// Custom deserializer for TelemetryEndpoints, used to convert urls or multiaddr to multiaddr.
fn url_or_multiaddr_deser<'de, D>(deserializer: D) -> Result<Vec<(Multiaddr, u8)>, D::Error>
where
D: Deserializer<'de>,
{
Vec::<(String, u8)>::deserialize(deserializer)?
.iter()
.map(|e| Ok((url_to_multiaddr(&e.0).map_err(serde::de::Error::custom)?, e.1)))
.collect()
}
impl TelemetryEndpoints {
pub fn new(endpoints: Vec<(String, u8)>) -> Result<Self, libp2p::multiaddr::Error> {
let endpoints: Result<Vec<(Multiaddr, u8)>, libp2p::multiaddr::Error> =
endpoints.iter().map(|e| Ok((url_to_multiaddr(&e.0)?, e.1))).collect();
endpoints.map(Self)
}
}
impl TelemetryEndpoints {
/// Return `true` if there are no telemetry endpoints, `false` otherwise.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
/// Parses a WebSocket URL into a libp2p `Multiaddr`.
fn url_to_multiaddr(url: &str) -> Result<Multiaddr, libp2p::multiaddr::Error> {
// First, assume that we have a `Multiaddr`.
let parse_error = match url.parse() {
Ok(ma) => return Ok(ma),
Err(err) => err,
};
// If not, try the `ws://path/url` format.
if let Ok(ma) = libp2p::multiaddr::from_url(url) {
return Ok(ma)
}
// If we have no clue about the format of that string, assume that we were expecting a
// `Multiaddr`.
Err(parse_error)
}
/// Log levels.
pub const SUBSTRATE_DEBUG: &str = "9";
pub const SUBSTRATE_INFO: &str = "0";
pub const CONSENSUS_TRACE: &str = "9";
pub const CONSENSUS_DEBUG: &str = "5";
pub const CONSENSUS_WARN: &str = "4";
pub const CONSENSUS_INFO: &str = "1";
/// Telemetry object. Implements `Future` and must be polled regularly.
/// Contains an `Arc` and can be cloned and pass around. Only one clone needs to be polled
/// regularly and should be polled regularly.
/// Dropping all the clones unregisters the telemetry.
#[derive(Clone)]
pub struct Telemetry {
inner: Arc<Mutex<TelemetryInner>>,
/// Slog guard so that we don't get deregistered.
_guard: Arc<slog_scope::GlobalLoggerGuard>,
}
/// Behind the `Mutex` in `Telemetry`.
///
/// Note that ideally we wouldn't have to make the `Telemetry` cloneable, as that would remove the
/// need for a `Mutex`. However there is currently a weird hack in place in `sc-service`
/// where we extract the telemetry registration so that it continues running during the shutdown
/// process.
struct TelemetryInner {
/// Worker for the telemetry. `None` if it failed to initialize.
worker: Option<worker::TelemetryWorker>,
/// Receives log entries for them to be dispatched to the worker.
receiver: mpsc::Receiver<async_record::AsyncRecord>,
}
/// Implements `slog::Drain`.
struct TelemetryDrain {
/// Sends log entries.
sender: std::panic::AssertUnwindSafe<mpsc::Sender<async_record::AsyncRecord>>,
}
/// Initializes the telemetry. See the crate root documentation for more information.
///
/// Please be careful to not call this function twice in the same program. The `slog` crate
/// doesn't provide any way of knowing whether a global logger has already been registered.
pub fn init_telemetry(config: TelemetryConfig) -> Telemetry {
// Build the list of telemetry endpoints.
let (endpoints, wasm_external_transport) = (config.endpoints.0, config.wasm_external_transport);
let (sender, receiver) = mpsc::channel(16);
let guard = {
let logger = TelemetryDrain { sender: std::panic::AssertUnwindSafe(sender) };
let root = slog::Logger::root(slog::Drain::fuse(logger), slog::o!());
slog_scope::set_global_logger(root)
};
let worker = match worker::TelemetryWorker::new(endpoints, wasm_external_transport) {
Ok(w) => Some(w),
Err(err) => {
error!(target: "telemetry", "Failed to initialize telemetry worker: {:?}", err);
None
},
};
Telemetry {
inner: Arc::new(Mutex::new(TelemetryInner { worker, receiver })),
_guard: Arc::new(guard),
}
}
/// Event generated when polling the worker.
#[derive(Debug)]
pub enum TelemetryEvent {
/// We have established a connection to one of the telemetry endpoint, either for the first
/// time or after having been disconnected earlier.
Connected,
}
impl Stream for Telemetry {
type Item = TelemetryEvent;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let before = Instant::now();
// Because the `Telemetry` is cloneable, we need to put the actual fields behind a `Mutex`.
// However, the user is only ever supposed to poll from one instance of `Telemetry`, while
// the other instances are used only for RAII purposes.
// We assume that the user is following this advice and therefore that the `Mutex` is only
// ever locked once at a time.
let mut inner = match self.inner.try_lock() {
Some(l) => l,
None => {
warn!(
target: "telemetry",
"The telemetry seems to be polled multiple times simultaneously"
);
// Returning `Pending` here means that we may never get polled again, but this is
// ok because we're in a situation where something else is actually currently doing
// the polling.
return Poll::Pending
},
};
let mut has_connected = false;
// The polling pattern is: poll the worker so that it processes its queue, then add one
// message from the receiver (if possible), then poll the worker again, and so on.
loop {
if let Some(worker) = inner.worker.as_mut() {
while let Poll::Ready(event) = worker.poll(cx) {
// Right now we only have one possible event. This line is here in order to not
// forget to handle any possible new event type.
let worker::TelemetryWorkerEvent::Connected = event;
has_connected = true;
}
}
if let Poll::Ready(Some(log_entry)) =
Stream::poll_next(Pin::new(&mut inner.receiver), cx)
{
if let Some(worker) = inner.worker.as_mut() {
log_entry.as_record_values(|rec, val| {
let _ = worker.log(rec, val);
});
}
} else {
break
}
}
if before.elapsed() > Duration::from_millis(200) {
warn!(target: "telemetry", "Polling the telemetry took more than 200ms");
}
if has_connected {
Poll::Ready(Some(TelemetryEvent::Connected))
} else {
Poll::Pending
}
}
}
impl slog::Drain for TelemetryDrain {
type Ok = ();
type Err = ();
fn log(
&self,
record: &slog::Record,
values: &slog::OwnedKVList,
) -> Result<Self::Ok, Self::Err> {
let before = Instant::now();
let serialized = async_record::AsyncRecord::from(record, values);
// Note: interestingly, `try_send` requires a `&mut` because it modifies some internal
// value, while `clone()` is lock-free.
if let Err(err) = self.sender.clone().try_send(serialized) {
warn!(target: "telemetry", "Ignored telemetry message because of error on channel: {:?}", err);
}
if before.elapsed() > Duration::from_millis(50) {
warn!(target: "telemetry", "Writing a telemetry log took more than 50ms");
}
Ok(())
}
}
/// Translates to `slog_scope::info`, but contains an additional verbosity
/// parameter which the log record is tagged with. Additionally the verbosity
/// parameter is added to the record as a key-value pair.
#[macro_export]
macro_rules! telemetry {
( $a:expr; $b:expr; $( $t:tt )* ) => {
$crate::with_logger(|l| {
$crate::slog::slog_info!(l, #$a, $b; $($t)* )
})
}
}
#[cfg(test)]
mod telemetry_endpoints_tests {
use super::url_to_multiaddr;
use super::TelemetryEndpoints;
use libp2p::Multiaddr;
#[test]
fn valid_endpoints() {
let endp = vec![
("wss://telemetry.polkadot.io/submit/".into(), 3),
("/ip4/80.123.90.4/tcp/5432".into(), 4),
];
let telem =
TelemetryEndpoints::new(endp.clone()).expect("Telemetry endpoint should be valid");
let mut res: Vec<(Multiaddr, u8)> = vec![];
for (a, b) in endp.iter() {
res.push((url_to_multiaddr(a).expect("provided url should be valid"), *b))
}
assert_eq!(telem.0, res);
}
#[test]
fn invalid_endpoints() {
let endp = vec![
("/ip4/...80.123.90.4/tcp/5432".into(), 3),
("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4),
];
let telem = TelemetryEndpoints::new(endp);
assert!(telem.is_err());
}
#[test]
fn valid_and_invalid_endpoints() {
let endp = vec![
("/ip4/80.123.90.4/tcp/5432".into(), 3),
("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4),
];
let telem = TelemetryEndpoints::new(endp);
assert!(telem.is_err());
}
}
|
/*
Copyright 2020 <盏一 w@hidva.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use clap::{App, Arg};
use kuiba::{access::redo::redo, guc, init_log, postgres_main, LAST_INTERNAL_SESSID};
use std::net::TcpListener;
use std::thread;
fn new_sessid(lastused: &mut u32) -> u32 {
*lastused += 1;
let v = *lastused;
if v <= LAST_INTERNAL_SESSID {
panic!("new_sessid: unexpected sessid")
} else {
v
}
}
fn main() {
init_log();
let cmdline = App::new("KuiBaDB(魁拔)")
.version(kuiba::KB_VERSTR)
.author("盏一 <w@hidva.com>")
.about("KuiBaDB is another Postgresql written in Rust")
.arg(
Arg::with_name("datadir")
.short("D")
.long("datadir")
.required(true)
.takes_value(true),
)
.get_matches();
let datadir = cmdline
.value_of("datadir")
.expect("You must specify the -D invocation option!");
let global_state = redo(&datadir).expect("redo failed");
let port = guc::get_int(&global_state.gucstate, guc::Port) as u16;
let listener = TcpListener::bind(("127.0.0.1", port)).unwrap();
log::info!("listen. port={}", port);
let mut lastused_sessid = LAST_INTERNAL_SESSID;
for stream in listener.incoming() {
let stream = stream.unwrap();
let global_state = global_state.clone();
let sessid = new_sessid(&mut lastused_sessid);
thread::spawn(move || {
postgres_main(global_state, stream, sessid);
});
}
}
|
pub mod attributes;
pub mod events;
mod elements;
pub use self::elements::*;
use std::cell::RefCell;
use std::rc::Rc;
use wasm_bindgen::closure::Closure;
#[derive(Clone, Debug)]
pub enum Html<Msg> {
Element(Element<Msg>),
Text(String),
}
impl<T: ToString, Msg> From<T> for Html<Msg> {
fn from(t: T) -> Html<Msg> {
Html::Text(t.to_string())
}
}
#[derive(Clone, Debug)]
pub struct Element<Msg> {
pub name: String,
pub attrs: Vec<Attribute<Msg>>,
pub children: Children<Msg>,
}
impl<Msg> From<Element<Msg>> for Html<Msg> {
fn from(el: Element<Msg>) -> Html<Msg> {
Html::Element(el)
}
}
#[derive(Clone, Debug)]
pub enum Children<Msg> {
SelfClosing,
Nodes(Vec<Html<Msg>>),
}
impl<Msg> Element<Msg> {
pub fn key(&self) -> Option<&str> {
for attr in &self.attrs {
if let Attribute::Key(ref key) = attr {
return Some(key);
}
}
None
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum Attribute<Msg> {
Text(String, String),
Bool(String),
Key(String),
Event(EventListener<Msg>),
}
#[derive(Clone, Debug, PartialEq)]
pub struct EventListener<Msg> {
pub js_closure: JsClosure,
pub type_: String,
pub stop_propagation: bool,
pub prevent_default: bool,
pub to_message: EventToMessage<Msg>,
}
#[derive(Clone, Default)]
pub struct JsClosure(pub Rc<RefCell<Option<Closure<Fn(web_sys::Event)>>>>);
impl std::fmt::Debug for JsClosure {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.0.borrow().is_some() {
write!(f, "HAS A CLOSURE")
} else {
write!(f, "NO CLOSURE")
}
}
}
impl std::cmp::PartialEq for JsClosure {
fn eq(&self, _: &JsClosure) -> bool {
// This is not good enough to implent Eq, i think
// And its a bit weird. But it's to ignore this in the Attribute enum
true
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum EventToMessage<Msg> {
StaticMsg(Msg),
}
pub fn text<Msg, S: Into<String>>(inner: S) -> Html<Msg> {
Html::Text(inner.into())
}
|
use std::io;
use docker::Docker;
use strategy::CleanupStrategy;
pub struct ImageCleanup{
docker: Docker
}
impl ImageCleanup {
pub fn new(docker: Docker) -> ImageCleanup{
ImageCleanup {
docker: docker
}
}
pub fn cleanup(&self, strategy: &CleanupStrategy) -> Result<(), io::Error>{
let images = try!(self.docker.get_images(false));
let remove = try!(strategy.filter(images));
for image in remove {
for tag in image.RepoTags {
info!("Remove {}", tag);
let status = try!(self.docker.delete_image(tag, false, false));
debug!("{:?}", status);
}
}
Ok(())
}
}
|
fn main() {
// We're going to decrement these in a clever way
let mut num1 = 999;
let mut num2 = 999;
// Keep track of the largest product so far
let mut largest = 0;
// The threshold past which you stop checking
let mut limit = 0;
// Iterate through all (899 choose 2 = 403651) possible pairings
// of 3 digit numbers. 899 because that's how many numbers there
// are between 100 and 999. Or is it? Off-by-one errors are hard.
for complement in (1..403651) {
// If you reach this point, there's no need to check the rest.
if num1 < limit {
break
}
// We want to do one better than a brute-force search of
// pairings. In particular, we probably want to search by
// going from large three digit numbers to small ones. But
// we can also be fairly confident that things like 999 * 1
// won't yield the largest products. So, we want to search
// by taking
//
// 999 * 999, then
// 998 * 999, 998 * 998, then
// 997 * 999, 997 * 998, 997 * 997, etc.
//
// We can do this by decrementing num1 whenever complement
// is a triangle number, i.e. whenever complement is of the
// form n(n+1)/2. n has to be a natural number, so solving for
// n, this happens whenever (8*complement) + 1 is a perfect
// square.
let determiner = (8.0*(complement as f64) + 1.0).sqrt();
// If it's a perfect square, then the square root of its float
// value will equal the floor, and we decrement num1 by 1, and
// reset num2 to 999. else, we decrement num2.
if determiner == determiner.floor() {
num1 = num1 - 1;
num2 = 999;
} else {
num2 = num2 - 1;
}
// uncomment this to see the number pairing stuff in action.
// println!("{}, {}", num1, num2);
// The rest is pretty simple. We take the product,
let product = num1 * num2;
// check if it's larger than the previous largest palindrome
if product > largest {
// if so, we'll invest the time into checking whether
// it's a palindrome
if is_palindrome(product) {
println!("Largest product so far is {} = {} x {}",
product, num1, num2);
largest = product; // update the largest
limit = largest / 999 // and our stopping point.
}
}
}
}
fn is_palindrome(n:u32) -> bool {
// convert the number to a string
let s: String = n.to_string();
// zip corresponding pairs of characters...
for (char1, char2) in s.chars().zip(s.chars().rev()) {
// if any don't match, it's not a palindrome!
if char1 != char2 {
return false;
}
}
true
}
|
use crate::serv_auth::Preauthed;
use crate::serv_conn::ServiceConnection;
use crate::util::ArcExt;
use acme_lib::Certificate;
use serde::{Deserialize, Serialize};
use std::sync::Weak;
// It's important the peek doesn't expect more than the smallest possible request.
// The smallest possible HTTP request would be about 18 bytes
// "GET / HTTP/1.0\r\n\r\n"
// we use "lolb<8 bytes>" to indicate a service connection (12 bytes).
pub(crate) const PREAUTH_LEN: usize = 12;
pub(crate) const PREAUTH_PREFIX: &[u8] = b"lolb";
/// Holder of all defined services.
#[derive(Debug, Default)]
pub(crate) struct Services {
domains: Vec<ServiceDomain>,
}
/// Domain to be serviced by a load balancer.
#[derive(Debug)]
pub(crate) struct ServiceDomain {
/// The dns name of the domain serviced. Something like `example.com`.
domain: String,
/// Auth to use when adding service connections to this domain.
auth: ServiceAuth,
/// The current set of of hosts serviced under the domain. A host
/// would be something like `myservice.example.com`.
hosts: Vec<ServiceHost>,
}
/// Kinds of authentications for authenticating connections added to the service domain.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ServiceAuth {
/// Some secret string shared between the load balancer and the service.
PresharedKey(String),
}
impl ServiceAuth {
fn is_valid(&self, secret: &str) -> bool {
match self {
ServiceAuth::PresharedKey(x) => secret == x,
}
}
}
/// Service host gather a bunch of routes for that host. It is possible to route
/// `https://myservice.example.com/something/*` to one set of hosts, and
/// `https://myservice.example.com/other/*` to another.
#[derive(Debug)]
pub(crate) struct ServiceHost {
/// The service host name. Something like `myservice.example.com`.
host: String,
/// TLS certificate needed to service this domain. If known.
cert: Option<Certificate>,
/// Current routes for the host. When selecting route the longest route prefix
/// wins.
routes: Vec<ServiceRoute>,
}
/// A route under a service host.
#[derive(Debug)]
pub(crate) struct ServiceRoute {
/// Route under service. I.e. `/something` or `/other`. Empty string is not
/// a valid route, instead we use `/`.
prefix: String,
/// Current connections servicing this route. The strong reference is held by the
/// closure driving the connection.
connections: Vec<Weak<ServiceConnection>>,
}
impl Services {
pub fn new() -> Self {
Services {
..Default::default()
}
}
pub fn is_valid_secret(&self, p: &Preauthed, secret: &str) -> bool {
let service = self.domains.iter().find(|s| p.is_same_domain(s));
if let Some(service) = service {
return service.auth.is_valid(secret);
} else {
// XXX log something.
}
false
}
pub fn add_preauthed(&mut self, p: Preauthed, c: Weak<ServiceConnection>) {
let service = self
.domains
.iter_mut()
.find(|s| p.is_same_domain(s))
// we shouldn't be able to have created a preauthed instance for
// a domain that doesn't exist, so this is a fault.
.expect("Preauthed for not configured domain");
service.add_preauthed(p, c);
}
/// Route the request to a service.
pub fn route<X>(&mut self, req: &http::Request<X>) -> Option<ServiceConnection> {
let uri = req.uri();
let host = uri.authority_part()?.host();
let path = uri.path_and_query().map(|p| p.path()).unwrap_or("/");
// find something that matches domain ending i.e: `a.b.c.com` might match
// `b.c.com` and `c.com`. Longest wins.
let mut domains: Vec<&mut ServiceDomain> = self
.domains
.iter_mut()
.filter(|s| s.domain.ends_with(host))
.collect();
domains.as_mut_slice().sort_by_key(|d| d.domain.len());
// the "best" domain is last.
let domain = domains.last_mut()?;
// the host must be an exact match.
let host = domain.hosts.iter_mut().find(|h| h.host == host)?;
// find all routes that has a prefix that matches the incoming request path.
let mut routes: Vec<&mut ServiceRoute> = host
.routes
.iter_mut()
.filter(|r| path.starts_with(&r.prefix))
.collect();
routes.as_mut_slice().sort_by_key(|r| r.prefix.len());
// the "best" is the last.
let route = routes.last_mut()?;
// find a connection that is alive.
// TODO sticky logic
let conn = loop {
// prune dead connections.
route.connections.retain(|c| c.upgrade().is_some());
if route.connections.is_empty() {
break None;
}
// find first connection that is not dead
if let Some(s) = route
.connections
.iter()
.find(|c| c.upgrade().is_some())
.and_then(|c| c.upgrade())
{
// ServiceConnection contains a h2 SendRequest, that we must clone to
// get "our own" instance to send requests to.
//
// At this point we hold a _strong_ reference
// to Arc<ServiceConnection> and it will not be gone by connection disconnecting.
// Whether it will work to send requests to is a whole other matter.
break Some(s.clone_contained());
}
}?;
Some(conn)
}
}
impl ServiceDomain {
pub fn domain(&self) -> &str {
&self.domain
}
/// add/create a routing entry for a preauthed service connection.
pub fn add_preauthed(&mut self, p: Preauthed, c: Weak<ServiceConnection>) {
let mut idx = self.hosts.iter().position(|h| p.is_same_host(h));
if idx.is_none() {
idx = Some(self.hosts.len());
self.hosts.push(ServiceHost::new(p.host()));
}
let host = self.hosts.get_mut(idx.unwrap()).unwrap();
host.add_preauthed(p, c);
}
}
impl ServiceHost {
fn new(host: &str) -> Self {
ServiceHost {
host: host.to_string(),
cert: None,
routes: vec![],
}
}
pub fn host(&self) -> &str {
&self.host
}
/// add/create a routing entry for a preauthed service connection.
pub fn add_preauthed(&mut self, p: Preauthed, c: Weak<ServiceConnection>) {
let mut idx = self.routes.iter().position(|r| p.is_same_prefix(r));
if idx.is_none() {
idx = Some(self.routes.len());
self.routes.push(ServiceRoute::new(p.prefix()));
}
let route = self.routes.get_mut(idx.unwrap()).unwrap();
route.add_connection(c);
}
}
impl ServiceRoute {
pub fn new(prefix: &str) -> Self {
ServiceRoute {
prefix: prefix.to_string(),
connections: vec![],
}
}
pub fn prefix(&self) -> &str {
&self.prefix
}
pub fn add_connection(&mut self, c: Weak<ServiceConnection>) {
self.connections.push(c);
}
}
|
use std::env;
use std::error::Error;
use std::result;
type Result<T> = result::Result<T, Box<dyn Error>>;
struct Scores {
scores: Vec<usize>,
elves: [usize; 2],
hold: Vec<usize>,
}
impl Iterator for Scores {
type Item = usize;
fn next(&mut self) -> Option<usize> {
if !self.hold.is_empty() {
return self.hold.pop();
}
let sum = self.elves.iter().map(|&e| self.scores[e]).sum::<usize>();
let result = if sum > 9 {
assert!(sum < 20);
self.scores.push(sum / 10);
self.scores.push(sum % 10);
self.hold.push(sum % 10);
sum / 10
} else {
self.scores.push(sum);
sum
};
for elf in &mut self.elves {
*elf = (*elf + self.scores[*elf] + 1) % self.scores.len();
}
Some(result)
}
}
fn main() -> Result<()> {
let args: Vec<_> = env::args().skip(1).collect();
let (score0, score1, input) = match &args.as_slice() {
&[a, b, c] => (a, b, c),
_ => return Err("expected 3 arguments".into()),
};
let score0: usize = score0.parse()?;
let score1: usize = score1.parse()?;
let iterations: usize = input.parse()?;
let scores = Scores {
scores: vec![score0, score1],
elves: [0, 1],
hold: vec![score1, score0],
};
for score in scores.skip(iterations).take(10) {
print!("{}", score);
}
println!();
let scores = Scores {
scores: vec![score0, score1],
elves: [0, 1],
hold: vec![score1, score0],
};
let score_pattern: Vec<usize> = input
.as_bytes()
.into_iter()
.map(|c| (c - b'0') as usize)
.collect();
let mut matched = 0;
for (i, score) in scores.enumerate() {
if score == score_pattern[matched] {
matched += 1;
if matched == score_pattern.len() {
println!("{}", i - matched + 1);
break;
}
} else {
matched = if score == score_pattern[0] { 1 } else { 0 };
}
}
Ok(())
}
|
use serde::Deserialize;
use serde_json::Value;
use std::mem;
use std::ops::Range;
#[derive(Clone)]
pub struct Line {
text: String,
/// List of carets, in units of utf-16 code units.
cursor: Vec<usize>,
styles: Vec<StyleSpan>,
invalid: bool,
new_ln: usize,
}
#[derive(Deserialize)]
pub struct Style {
pub id: usize,
pub fg_color: Option<u32>,
italic: Option<bool>,
}
#[derive(Debug, Clone)]
pub struct StyleSpan {
pub style_id: usize,
/// Range of span, in units of utf-16 code units
pub range: Range<usize>,
}
impl Line {
pub fn from_json(v: &Value) -> Line {
let text = v["text"].as_str().unwrap().to_owned();
let mut cursor = Vec::new();
if let Some(arr) = v["cursor"].as_array() {
for c in arr {
let offset_utf8 = c.as_u64().unwrap() as usize;
cursor.push(count_utf16(&text[..offset_utf8]));
}
}
let mut styles = Vec::new();
if let Some(arr) = v["styles"].as_array() {
let mut ix: i64 = 0;
for triple in arr.chunks(3) {
let start = ix + triple[0].as_i64().unwrap();
let end = start + triple[1].as_i64().unwrap();
// TODO: count utf from last end, if <=
let start_utf16 = count_utf16(&text[..start as usize]);
let end_utf16 = start_utf16 + count_utf16(&text[start as usize..end as usize]);
let style_id = triple[2].as_u64().unwrap() as usize;
let style_span = StyleSpan {
style_id,
range: start_utf16..end_utf16,
};
styles.push(style_span);
ix = end;
}
}
Line {
text,
cursor,
styles,
invalid: true,
new_ln: 0,
}
}
pub fn text(&self) -> &str {
&self.text
}
pub fn cursor(&self) -> &[usize] {
&self.cursor
}
pub fn styles(&self) -> &[StyleSpan] {
&self.styles
}
pub fn new_ln(&self) -> usize {
self.new_ln
}
}
#[derive(Deserialize, Debug, Clone)]
pub struct Annotation {
#[serde(rename = "type")]
pub kind: String,
pub ranges: Vec<[usize; 4]>,
pub payloads: Option<()>,
pub n: usize,
}
impl Annotation {
pub fn check_line(&self, ln: usize, line: &Line) -> Option<(usize, usize)> {
let len = count_utf16(line.text());
for range in &self.ranges {
let start_line = range[0];
let start_col = range[1];
let end_line = range[2];
let end_col = range[3];
if start_line > ln {
return None;
}
if end_line < ln {
return None;
}
if start_line < ln && ln < end_line {
return Some((0, len));
}
let mut start = 0;
let mut end = 0;
if start_line == ln {
start = start_col;
if end_line > ln {
end = len;
}
}
if end_line == ln {
end = end_col;
}
return Some((start, end));
}
None
}
}
pub struct LineCache {
lines: Vec<Option<Line>>,
old_lines: Vec<Option<Line>>,
annotations: Vec<Annotation>,
}
impl LineCache {
pub fn new() -> LineCache {
LineCache {
lines: Vec::new(),
old_lines: Vec::new(),
annotations: Vec::new(),
}
}
fn push_opt_line(&mut self, line: Option<Line>) {
self.lines.push(line);
}
pub fn apply_update(&mut self, update: &Value) -> (usize, usize) {
let old_cache = mem::replace(self, LineCache::new());
let mut old_lines = old_cache.lines;
let mut i = 0;
let mut ln = 0;
let mut pending_skip = 0;
let mut pending_invalidate = 0;
let mut previous_insert = 0;
for op in update["ops"].as_array().unwrap() {
let op_type = &op["op"];
if op_type == "ins" {
let lines = op["lines"].as_array().unwrap();
pending_skip += lines.len();
for (j, line) in lines.iter().enumerate() {
let line = Line::from_json(line);
self.push_opt_line(Some(line));
ln += 1;
}
previous_insert += lines.len();
} else if op_type == "copy" {
pending_skip = 0;
pending_invalidate = 0;
previous_insert = 0;
let n = op["n"].as_u64().unwrap();
for _ in 0..n {
let line = match old_lines.get(i).unwrap_or(&None).clone() {
Some(mut line) => {
if i != ln {
line.invalid = true;
} else {
line.invalid = false;
}
Some(line)
}
None => None,
};
self.push_opt_line(line);
match old_lines.get(i).unwrap_or(&None).clone() {
Some(mut old_line) => {
old_line.new_ln = ln;
mem::replace(&mut old_lines[i], Some(old_line));
}
None => (),
};
i += 1;
ln += 1;
// self.push_opt_line(old_iter.next().unwrap_or_default());
}
} else if op_type == "skip" {
let n = op["n"].as_u64().unwrap() as usize;
for j in 0..n {
let new_ln = if j > pending_skip - 1 {
ln
} else {
ln - pending_skip + j
};
match old_lines.get(i).unwrap_or(&None).clone() {
Some(mut old_line) => {
old_line.new_ln = new_ln;
mem::replace(&mut old_lines[i], Some(old_line));
}
None => (),
};
i += 1;
}
pending_skip = 0;
pending_invalidate = 0;
previous_insert = 0;
} else if op_type == "invalidate" {
let n = op["n"].as_u64().unwrap() as usize;
for j in 0..n {
let old_index = i + j + pending_invalidate + previous_insert;
let line = match old_lines.get(old_index).unwrap_or(&None).clone() {
Some(mut line) => {
if old_index != ln {
line.invalid = true;
} else {
line.invalid = false;
}
Some(line)
}
None => None,
};
self.push_opt_line(line);
ln += 1;
}
pending_skip += n;
pending_invalidate += n;
}
}
if pending_skip > 0 {
for j in 0..pending_skip {
let new_ln = if j > pending_skip - 1 {
ln
} else {
ln - pending_skip + j
};
match old_lines.get(i).unwrap_or(&None).clone() {
Some(mut old_line) => {
old_line.new_ln = new_ln;
mem::replace(&mut old_lines[i], Some(old_line));
}
None => (),
};
i += 1;
}
}
self.old_lines = old_lines;
if let Ok(annotations) =
serde_json::from_value::<Vec<Annotation>>(update["annotations"].clone())
{
self.annotations = annotations;
}
let mut start = -1;
let mut end = -1;
let mut n = 0;
for line in &self.lines {
match line {
Some(line) => {
if line.invalid {
if start == -1 {
start = n;
}
if n > end {
end = n;
}
}
}
None => (),
}
n += 1;
}
(start as usize, end as usize)
}
pub fn height(&self) -> usize {
self.lines.len()
}
pub fn get_line(&self, ix: usize) -> Option<&Line> {
if ix < self.lines.len() {
self.lines[ix].as_ref()
} else {
None
}
}
pub fn get_old_line(&self, ix: usize) -> Option<&Line> {
if ix < self.old_lines.len() {
self.old_lines[ix].as_ref()
} else {
None
}
}
pub fn annotations(&self) -> Vec<Annotation> {
self.annotations.clone()
}
}
/// Counts the number of utf-16 code units in the given string.
pub fn count_utf16(s: &str) -> usize {
let mut utf16_count = 0;
for &b in s.as_bytes() {
if (b as i8) >= -0x40 {
utf16_count += 1;
}
if b >= 0xf0 {
utf16_count += 1;
}
}
utf16_count
}
|
fn main() {
type Account = Box<FnMut(i32) -> Result<i32, &'static str>>;
fn make_withdraw(mut balance: i32) -> Account {
Box::new(move |amount: i32| {
if balance >= amount {
balance -= amount;
Ok(balance)
} else {
Err("Insufficient funds")
}
})
}
let mut w1 = make_withdraw(100);
let mut w2 = make_withdraw(100);
println!("{:?}", w1(10));
println!("{:?}", w1(20));
println!("{:?}", w1(80));
println!("{:?}", w2(40));
}
|
use pretty_assertions::assert_eq;
use crate::{
bson::doc,
bson_util,
cmap::StreamDescription,
concern::{Acknowledgment, WriteConcern},
error::{ErrorKind, WriteConcernError, WriteError, WriteFailure},
operation::{test::handle_response_test, Delete, Operation},
options::DeleteOptions,
Namespace,
};
#[test]
fn build_many() {
let ns = Namespace {
db: "test_db".to_string(),
coll: "test_coll".to_string(),
};
let filter = doc! { "x": { "$gt": 1 } };
let wc = WriteConcern {
w: Some(Acknowledgment::Majority),
..Default::default()
};
let options = DeleteOptions::builder().write_concern(wc).build();
let mut op = Delete::new(ns, filter.clone(), None, Some(options));
let description = StreamDescription::new_testing();
let mut cmd = op.build(&description).unwrap();
assert_eq!(cmd.name.as_str(), "delete");
assert_eq!(cmd.target_db.as_str(), "test_db");
let mut expected_body = doc! {
"delete": "test_coll",
"deletes": [
{
"q": filter,
"limit": 0,
}
],
"writeConcern": {
"w": "majority"
},
"ordered": true,
};
bson_util::sort_document(&mut cmd.body);
bson_util::sort_document(&mut expected_body);
assert_eq!(cmd.body, expected_body);
}
#[test]
fn build_one() {
let ns = Namespace {
db: "test_db".to_string(),
coll: "test_coll".to_string(),
};
let filter = doc! { "x": { "$gt": 1 } };
let wc = WriteConcern {
w: Some(Acknowledgment::Majority),
..Default::default()
};
let options = DeleteOptions::builder().write_concern(wc).build();
let mut op = Delete::new(ns, filter.clone(), Some(1), Some(options));
let description = StreamDescription::new_testing();
let mut cmd = op.build(&description).unwrap();
assert_eq!(cmd.name.as_str(), "delete");
assert_eq!(cmd.target_db.as_str(), "test_db");
let mut expected_body = doc! {
"delete": "test_coll",
"deletes": [
{
"q": filter,
"limit": 1,
}
],
"writeConcern": {
"w": "majority"
},
"ordered": true,
};
bson_util::sort_document(&mut cmd.body);
bson_util::sort_document(&mut expected_body);
assert_eq!(cmd.body, expected_body);
}
#[test]
fn handle_success() {
let op = Delete::empty();
let delete_result = handle_response_test(
&op,
doc! {
"ok": 1.0,
"n": 3
},
)
.expect("should succeed");
assert_eq!(delete_result.deleted_count, 3);
}
#[test]
fn handle_invalid_response() {
let op = Delete::empty();
handle_response_test(
&op,
doc! {
"ok": 1.0,
"asffasdf": 123123
},
)
.expect_err("should fail");
}
#[test]
fn handle_write_failure() {
let op = Delete::empty();
let write_error_response = doc! {
"ok": 1.0,
"n": 0,
"writeErrors": [
{
"index": 0,
"code": 1234,
"errmsg": "my error string"
}
]
};
let write_error = handle_response_test(&op, write_error_response).unwrap_err();
match *write_error.kind {
ErrorKind::Write(WriteFailure::WriteError(ref error)) => {
let expected_err = WriteError {
code: 1234,
code_name: None,
message: "my error string".to_string(),
details: None,
};
assert_eq!(error, &expected_err);
}
ref e => panic!("expected write error, got {:?}", e),
};
}
#[test]
fn handle_write_concern_failure() {
let op = Delete::empty();
let wc_error_response = doc! {
"ok": 1.0,
"n": 0,
"writeConcernError": {
"code": 456,
"codeName": "wcError",
"errmsg": "some message",
"errInfo": {
"writeConcern": {
"w": 2,
"wtimeout": 0,
"provenance": "clientSupplied"
}
}
}
};
let wc_error = handle_response_test(&op, wc_error_response)
.expect_err("should fail with write concern error");
match *wc_error.kind {
ErrorKind::Write(WriteFailure::WriteConcernError(ref wc_error)) => {
let expected_wc_err = WriteConcernError {
code: 456,
code_name: "wcError".to_string(),
message: "some message".to_string(),
details: Some(doc! { "writeConcern": {
"w": 2,
"wtimeout": 0,
"provenance": "clientSupplied"
} }),
labels: vec![],
};
assert_eq!(wc_error, &expected_wc_err);
}
ref e => panic!("expected write concern error, got {:?}", e),
}
}
|
#![warn(rust_2018_idioms, clippy::all)]
mod helpers;
mod services;
use simple_logger::SimpleLogger;
use anyhow::{Result, Error};
use log::{
warn,
LevelFilter
};
use clap::{Arg, App, SubCommand, ArgMatches};
use yansi::Paint;
use crate::services::{
model::{EnsurableEntity, RemovableEntity},
docker::Docker,
k3d::K3d,
kubectl::Kubectl,
pip3::Pip3,
kfp::Kfp,
kfp_service::KfpService,
port_forward::PortForward,
k3d_service::K3dService,
curl::Curl,
git::Git,
kfctl::Kfctl,
kf_service::KfService,
k9s::K9s
};
// TODO:
// * Make all of the version, yaml location, blah, blah, blah, configurable.
// * Pass through options to k3d.
// * Fix `kfpl service` bug.
#[tokio::main]
async fn main() -> Result<()> {
// Set up logging.
SimpleLogger::new().with_level(LevelFilter::Info);
let init_help = &*format!("Ensures the dependencies are met ({}).", Paint::yellow("may need to be run as sudo"));
let app = App::new("kfpl")
.version("1.2.0")
.author("Aaron Roney")
.about("Automates running KubeFlow Pipelines (KFP) locally.")
.arg(Arg::with_name("skip_confirm")
.short("-y")
.long("yes")
.help("Answers all of the prompts with 'yes', resulting in a no-touch execution."))
.subcommand(SubCommand::with_name("init")
.about(init_help))
.subcommand(SubCommand::with_name("service")
.about("Commands to interact with the k3d cluster, and the KFP service.")
.subcommand(SubCommand::with_name("start")
.about("Starts the k8s cluster, and the KFP service.")
.arg(Arg::with_name("kfp_only")
.long("kfp-only")
.help("Deploys only KubeFlow Pipelines (KFP), rather than all of KubeFlow."))
.arg(Arg::with_name("k3d_cluster_name")
.short("n")
.long("k3d-cluster-name")
.takes_value(true)
.default_value("kfp-local")
.help("The `name` assigned to the cluster created by k3d."))
.arg(Arg::with_name("k3d_image")
.short("i")
.long("k3d-image")
.takes_value(true)
.default_value("rancher/k3s:v1.20.2-k3s1")
.help("The `k3s` image used to serve the k3d cluster."))
.arg(Arg::with_name("k3d_api_address")
.short("a")
.long("k3d-api-address")
.takes_value(true)
.default_value("0.0.0.0")
.help("The address to which the k3d load balancer for the kubernetes API is bound (e.g., if you don't want outside connections, use `127.0.0.1`)."))
.arg(Arg::with_name("k3d_api_port")
.short("p")
.long("k3d-api-port")
.takes_value(true)
.default_value("6443")
.help("The port to which the k3d load balancer for the kubernetes API is bound."))
.arg(Arg::with_name("kfp_version")
.long("kfp-version")
.takes_value(true)
.default_value("1.0.4")
.help("The specific version of KFP to install (only works with the `--kfp-only` option)."))
.arg(Arg::with_name("kf_yaml")
.long("kf-yaml")
.takes_value(true)
.default_value("https://raw.githubusercontent.com/kubeflow/manifests/v1.2-branch/kfdef/kfctl_k8s_istio.v1.2.0.yaml")
.help("The specific YAML manifest used to deploy KF (is ignored when `--kfp-only` is set).")))
.subcommand(SubCommand::with_name("stop")
.about("Stops the k8s cluster, and the KFP service.")
.arg(Arg::with_name("k3d_cluster_name")
.short("n")
.long("k3d-cluster-name")
.takes_value(true)
.default_value("kfp-local")
.help("The `name` assigned to the cluster created by k3d."))))
.subcommand(SubCommand::with_name("ui")
.about("Starts the port forwarding to the KFP UI via `kubectl`.")
.arg(Arg::with_name("kfp_only")
.long("kfp-only")
.help("Port forwards only the KubeFlow Pipelines (KFP) UI, rather than the KubeFlow UI."))
.arg(Arg::with_name("port")
.short("p")
.long("port")
.takes_value(true)
.default_value("8080")
.help("The localhost port to which you want to bind the port forward."))
.arg(Arg::with_name("address")
.short("a")
.long("address")
.takes_value(true)
.default_value("0.0.0.0")
.help("The address to which the port forwarding proxy is bound (e.g., if you don't want outside connections, use `127.0.0.1`).")));
execute(app).await?;
println!();
Ok(())
}
async fn execute(mut app: App<'_, '_>) -> Result<()> {
let args = app.clone().get_matches();
let confirm = !args.is_present("skip_confirm");
let (sub_name, sub_matches) = args.subcommand();
match sub_name {
"init" => init(confirm).await,
"service" => service(confirm, sub_matches.unwrap()).await,
"ui" => ui(confirm, sub_matches.unwrap()).await,
_ => app.print_long_help().map_err(|e| e.into())
}
}
async fn init(confirm: bool) -> Result<()> {
println!("Ensuring proper {} ...", Paint::blue("dependencies"));
Curl::default().ensure(confirm).await?;
Git::default().ensure(confirm).await?;
K3d::default().ensure(confirm).await?;
Kubectl::default().ensure(confirm).await?;
Kfctl::default().ensure(confirm).await?;
K9s::default().ensure(confirm).await?;
Pip3::default().ensure(confirm).await?;
Kfp::default().ensure(confirm).await?;
Docker::default().ensure(confirm).await?;
Ok(())
}
async fn service(confirm: bool, args: &ArgMatches<'_>) -> Result<()> {
let (sub_name, sub_args_option) = args.subcommand();
let sub_args = sub_args_option.unwrap();
match sub_name {
"start" => {
println!("Ensuring {} are running ...", Paint::blue("services"));
// SAFETY: unwrap is safe because it has a default value.
let k3d_cluster_name = sub_args.value_of("k3d_cluster_name").unwrap();
let k3d_image = sub_args.value_of("k3d_image").unwrap();
let k3d_api_address = sub_args.value_of("k3d_api_address").unwrap();
let k3d_api_port = sub_args.value_of("k3d_api_port").unwrap();
let kfp_version = sub_args.value_of("kfp_version").unwrap();
let kf_yaml = sub_args.value_of("kf_yaml").unwrap();
K3dService::default()
.with_k3d_cluster_name(k3d_cluster_name)
.with_k3d_image(k3d_image)
.with_k3d_api_address(k3d_api_address)
.with_k3d_api_port(k3d_api_port)
.ensure(confirm).await?;
if sub_args.is_present("kfp_only") {
KfpService::default()
.with_kfp_version(kfp_version)
.ensure(confirm).await?;
} else {
KfService::default()
.with_kf_yaml(kf_yaml)
.ensure(confirm).await?;
}
},
"stop" => {
println!("Stopping {} ...", Paint::blue("services"));
// SAFETY: unwrap is safe because it has a default value.
let k3d_cluster_name = sub_args.value_of("k3d_cluster_name").unwrap();
K3dService::default()
.with_k3d_cluster_name(k3d_cluster_name)
.remove(confirm).await?;
},
_ => return Err(Error::msg("Please use a subcommand (check out `kfpl service -h` for help)."))
}
Ok(())
}
async fn ui(confirm: bool, args: &ArgMatches<'_>) -> Result<()> {
println!("Starting the {} to the UI ...", Paint::blue("port forward"));
// SAFETY: unwrap is safe because it has a default value.
let kfp_only = args.is_present("kfp_only");
let port = args.value_of("port").unwrap();
let address = args.value_of("address").unwrap();
PortForward::default()
.with_kfp_only(kfp_only)
.with_port(port)
.with_address(address)
.ensure(confirm).await?;
Ok(())
} |
use crate::Pred;
use crate::Pred2;
use std::fmt::Debug;
use std::fmt::Formatter;
use std::fmt::Result as FmtResult;
std_prelude!();
fn iter_adjacent<T: Copy, I: Iterator<Item = T>>(mut iter: I) -> impl Iterator<Item = (T, T)> {
let fst = iter.next();
iter.scan(fst, |x, y| x.replace(y).map(|x| (x, y)))
}
#[derive(Clone, Copy, Default)]
/// Accepts iterators with all two adjacent elements accepted by a given pair predicate.
pub struct AllAdj<P>(pub P);
impl<P: Debug> Debug for AllAdj<P> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "AllAdj<{:?}>", self.0)
}
}
impl<T: ?Sized, U: ?Sized, P: Pred2<U>> Pred<T> for AllAdj<P>
where
for<'a> &'a T: IntoIterator<Item = &'a U>,
{
fn accept(t: &T) -> bool {
iter_adjacent(t.into_iter()).all(P::accept_tup)
}
}
#[derive(Clone, Copy, Default)]
/// Accepts iterators with any two adjacent elements accepted by a given pair predicate.
pub struct AnyAdj<P>(pub P);
impl<P: Debug> Debug for AnyAdj<P> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "AnyAdj<{:?}>", self.0)
}
}
impl<T: ?Sized, U: ?Sized, P: Pred2<U>> Pred<T> for AnyAdj<P>
where
for<'a> &'a T: IntoIterator<Item = &'a U>,
{
fn accept(t: &T) -> bool {
iter_adjacent(t.into_iter()).any(P::accept_tup)
}
}
#[derive(Clone, Copy, Default)]
/// Accepts iterators with all elements accepted by a given predicate.
pub struct All<P>(pub P);
impl<P: Debug> Debug for All<P> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "All<{:?}>", self.0)
}
}
impl<T: ?Sized, U: ?Sized, P: Pred<U>> Pred<T> for All<P>
where
for<'a> &'a T: IntoIterator<Item = &'a U>,
{
fn accept(t: &T) -> bool {
t.into_iter().all(P::accept)
}
}
#[derive(Clone, Copy, Default)]
/// Accepts iterators with any element accepted by a given predicate.
pub struct Any<P>(pub P);
impl<P: Debug> Debug for Any<P> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "Any<{:?}>", self.0)
}
}
impl<T: ?Sized, U: ?Sized, P: Pred<U>> Pred<T> for Any<P>
where
for<'a> &'a T: IntoIterator<Item = &'a U>,
{
fn accept(t: &T) -> bool {
t.into_iter().any(P::accept)
}
}
|
pub mod builder;
mod nfa;
pub use self::nfa::*;
|
//! Data structures used by the JSON-RPC API methods.
use crate::core::{StarknetBlockHash, StarknetBlockNumber};
use serde::{Deserialize, Serialize};
/// Special tag used when specifying the `latest` or `pending` block.
#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub enum Tag {
/// The most recent fully constructed block
///
/// Represented as the JSON string `"latest"` when passed as an RPC method argument,
/// for example:
/// `{"jsonrpc":"2.0","id":"0","method":"starknet_getBlockWithTxsByHash","params":["latest"]}`
#[serde(rename = "latest")]
Latest,
/// Currently constructed block
///
/// Represented as the JSON string `"pending"` when passed as an RPC method argument,
/// for example:
/// `{"jsonrpc":"2.0","id":"0","method":"starknet_getBlockWithTxsByHash","params":["pending"]}`
#[serde(rename = "pending")]
Pending,
}
impl std::fmt::Display for Tag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Tag::Latest => f.write_str("latest"),
Tag::Pending => f.write_str("pending"),
}
}
}
/// A wrapper that contains either a [Hash](self::BlockHashOrTag::Hash) or a [Tag](self::BlockHashOrTag::Tag).
#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(untagged)]
#[serde(deny_unknown_fields)]
pub enum BlockHashOrTag {
/// Hash of a block
///
/// Represented as a `0x`-prefixed hex JSON string of length from 1 up to 64 characters
/// when passed as an RPC method argument, for example:
/// `{"jsonrpc":"2.0","id":"0","method":"starknet_getBlockWithTxsByHash","params":["0x7d328a71faf48c5c3857e99f20a77b18522480956d1cd5bff1ff2df3c8b427b"]}`
Hash(StarknetBlockHash),
/// Special [Tag](crate::rpc::types::Tag) describing a block
Tag(Tag),
}
impl std::fmt::Display for BlockHashOrTag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BlockHashOrTag::Hash(StarknetBlockHash(h)) => f.write_str(&h.to_hex_str()),
BlockHashOrTag::Tag(t) => std::fmt::Display::fmt(t, f),
}
}
}
/// A wrapper that contains either a block [Number](self::BlockNumberOrTag::Number) or a [Tag](self::BlockNumberOrTag::Tag).
#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(untagged)]
#[serde(deny_unknown_fields)]
pub enum BlockNumberOrTag {
/// Number (height) of a block
Number(StarknetBlockNumber),
/// Special [Tag](crate::rpc::types::Tag) describing a block
Tag(Tag),
}
impl std::fmt::Display for BlockNumberOrTag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BlockNumberOrTag::Number(StarknetBlockNumber(n)) => std::fmt::Display::fmt(n, f),
BlockNumberOrTag::Tag(t) => std::fmt::Display::fmt(t, f),
}
}
}
/// Groups all strictly input types of the RPC API.
pub mod request {
use crate::{
core::{
CallParam, CallSignatureElem, ContractAddress, EntryPoint, EventKey, Fee,
TransactionVersion,
},
rpc::serde::{CallSignatureElemAsDecimalStr, FeeAsHexStr, TransactionVersionAsHexStr},
};
use serde::Deserialize;
use serde_with::{serde_as, skip_serializing_none};
/// Contains parameters passed to `starknet_call`.
#[serde_as]
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Serialize))]
#[serde(deny_unknown_fields)]
pub struct Call {
pub contract_address: ContractAddress,
pub calldata: Vec<CallParam>,
pub entry_point_selector: EntryPoint,
/// EstimateFee hurry: it doesn't make any sense to use decimal numbers for one field
#[serde(default)]
#[serde_as(as = "Vec<CallSignatureElemAsDecimalStr>")]
pub signature: Vec<CallSignatureElem>,
/// EstimateFee hurry: max fee is needed if there's a signature
#[serde_as(as = "FeeAsHexStr")]
#[serde(default = "call_default_max_fee")]
pub max_fee: Fee,
/// EstimateFee hurry: transaction version might be interesting, might not be around for
/// long
#[serde_as(as = "TransactionVersionAsHexStr")]
#[serde(default = "call_default_version")]
pub version: TransactionVersion,
}
const fn call_default_max_fee() -> Fee {
Call::DEFAULT_MAX_FEE
}
const fn call_default_version() -> TransactionVersion {
Call::DEFAULT_VERSION
}
impl Call {
pub const DEFAULT_MAX_FEE: Fee = Fee(web3::types::H128::zero());
pub const DEFAULT_VERSION: TransactionVersion =
TransactionVersion(web3::types::H256::zero());
}
/// This is what [`Call`] used to be, but is used in
/// [`crate::rpc::api::RpcApi::add_invoke_transaction`] for example.
///
/// It might be that [`Call`] and arguments of `addInvokeTransaction` could be unified in the
/// future when the dust has settled on the implementation.
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Serialize))]
#[serde(deny_unknown_fields)]
pub struct ContractCall {
pub contract_address: ContractAddress,
pub calldata: Vec<CallParam>,
pub entry_point_selector: EntryPoint,
}
/// Contains event filter parameters passed to `starknet_getEvents`.
#[skip_serializing_none]
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Serialize))]
#[serde(deny_unknown_fields)]
pub struct EventFilter {
#[serde(default, rename = "fromBlock")]
pub from_block: Option<crate::core::BlockId>,
#[serde(default, rename = "toBlock")]
pub to_block: Option<crate::core::BlockId>,
#[serde(default)]
pub address: Option<ContractAddress>,
#[serde(default)]
pub keys: Vec<EventKey>,
// These are inlined here because serde flatten and deny_unknown_fields
// don't work together.
pub page_size: usize,
pub page_number: usize,
}
}
/// Groups all strictly output types of the RPC API.
pub mod reply {
// At the moment both reply types are the same for get_code, hence the re-export
use crate::{
core::{
CallParam, ClassHash, ConstructorParam, ContractAddress, ContractAddressSalt,
EntryPoint, EventData, EventKey, Fee, GlobalRoot, SequencerAddress, StarknetBlockHash,
StarknetBlockNumber, StarknetBlockTimestamp, StarknetTransactionHash, TransactionNonce,
TransactionSignatureElem, TransactionVersion,
},
rpc::{
api::{BlockResponseScope, RawBlock},
serde::{FeeAsHexStr, TransactionVersionAsHexStr},
},
sequencer,
};
use serde::Serialize;
use serde_with::{serde_as, skip_serializing_none};
use stark_hash::StarkHash;
use std::convert::From;
/// L2 Block status as returned by the RPC API.
#[derive(Copy, Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub enum BlockStatus {
#[serde(rename = "PENDING")]
Pending,
#[serde(rename = "ACCEPTED_ON_L2")]
AcceptedOnL2,
#[serde(rename = "ACCEPTED_ON_L1")]
AcceptedOnL1,
#[serde(rename = "REJECTED")]
Rejected,
}
impl From<sequencer::reply::Status> for BlockStatus {
fn from(status: sequencer::reply::Status) -> Self {
match status {
// TODO verify this mapping with Starkware
sequencer::reply::Status::AcceptedOnL1 => BlockStatus::AcceptedOnL1,
sequencer::reply::Status::AcceptedOnL2 => BlockStatus::AcceptedOnL2,
sequencer::reply::Status::NotReceived => BlockStatus::Rejected,
sequencer::reply::Status::Pending => BlockStatus::Pending,
sequencer::reply::Status::Received => BlockStatus::Pending,
sequencer::reply::Status::Rejected => BlockStatus::Rejected,
sequencer::reply::Status::Reverted => BlockStatus::Rejected,
sequencer::reply::Status::Aborted => BlockStatus::Rejected,
}
}
}
/// Wrapper for transaction data returned in block related queries,
/// chosen variant depends on [crate::rpc::api::BlockResponseScope](crate::rpc::api::BlockResponseScope).
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
#[serde(untagged)]
pub enum Transactions {
Full(Vec<Transaction>),
HashesOnly(Vec<StarknetTransactionHash>),
}
/// L2 Block as returned by the RPC API.
#[serde_as]
#[skip_serializing_none]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct Block {
pub status: BlockStatus,
pub block_hash: Option<StarknetBlockHash>,
pub parent_hash: StarknetBlockHash,
pub block_number: Option<StarknetBlockNumber>,
pub new_root: Option<GlobalRoot>,
pub timestamp: StarknetBlockTimestamp,
pub sequencer_address: SequencerAddress,
pub transactions: Transactions,
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct BlockHashAndNumber {
#[serde(rename = "block_hash")]
pub hash: StarknetBlockHash,
#[serde(rename = "block_number")]
pub number: StarknetBlockNumber,
}
impl Block {
/// Constructs [Block] from [RawBlock]
pub fn from_raw(block: RawBlock, transactions: Transactions) -> Self {
Self {
status: block.status,
block_hash: Some(block.hash),
parent_hash: block.parent_hash,
block_number: Some(block.number),
new_root: Some(block.root),
timestamp: block.timestamp,
sequencer_address: block.sequencer,
transactions,
}
}
/// Constructs [Block] from [sequencer's block representation](crate::sequencer::reply::Block)
pub fn from_sequencer_scoped(
block: sequencer::reply::MaybePendingBlock,
scope: BlockResponseScope,
) -> Self {
let transactions = match scope {
BlockResponseScope::TransactionHashes => {
let hashes = block.transactions().iter().map(|t| t.hash()).collect();
Transactions::HashesOnly(hashes)
}
BlockResponseScope::FullTransactions => {
let transactions = block.transactions().iter().map(|t| t.into()).collect();
Transactions::Full(transactions)
}
};
use sequencer::reply::MaybePendingBlock;
match block {
MaybePendingBlock::Block(block) => Self {
status: block.status.into(),
block_hash: Some(block.block_hash),
parent_hash: block.parent_block_hash,
block_number: Some(block.block_number),
new_root: Some(block.state_root),
timestamp: block.timestamp,
sequencer_address: block
.sequencer_address
// Default value for cairo <0.8.0 is 0
.unwrap_or(SequencerAddress(StarkHash::ZERO)),
transactions,
},
MaybePendingBlock::Pending(pending) => Self {
status: pending.status.into(),
block_hash: None,
parent_hash: pending.parent_hash,
block_number: None,
new_root: None,
timestamp: pending.timestamp,
sequencer_address: pending.sequencer_address,
transactions,
},
}
}
}
/// Starkware specific RPC error codes.
// TODO verify with Starkware how `sequencer::reply::starknet::ErrorCode` should
// map to the values below in all JSON-RPC API methods. Also verify if
// the mapping should be method-specific or common for all methods.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ErrorCode {
FailedToReceiveTransaction = 1,
ContractNotFound = 20,
InvalidMessageSelector = 21,
InvalidCallData = 22,
InvalidBlockId = 24,
InvalidTransactionHash = 25,
InvalidTransactionIndex = 27,
InvalidContractClassHash = 28,
PageSizeTooBig = 31,
NoBlocks = 32,
ContractError = 40,
}
/// We can have this equality and should have it in order to use it for tests. It is meant to
/// be used when expecting that the rpc result is an error. The rpc result should first be
/// accessed with [`Result::unwrap_err`], then compared to the expected [`ErrorCode`] with
/// [`assert_eq!`].
#[cfg(test)]
impl PartialEq<jsonrpsee::core::error::Error> for ErrorCode {
fn eq(&self, other: &jsonrpsee::core::error::Error) -> bool {
use jsonrpsee::core::error::Error;
use jsonrpsee::types::error::CallError;
if let Error::Call(CallError::Custom(custom)) = other {
// this is quite ackward dance to go back to error level then come back to the
// custom error object. it however allows not having the json structure in two
// places, and leaning on ErrorObject partialeq impl.
let repr = match self {
ErrorCode::PageSizeTooBig => {
Error::from(crate::storage::EventFilterError::PageSizeTooBig(
crate::storage::StarknetEventsTable::PAGE_SIZE_LIMIT,
))
}
other => Error::from(*other),
};
let repr = match repr {
Error::Call(CallError::Custom(repr)) => repr,
unexpected => unreachable!("using pathfinders ErrorCode to create jsonrpsee did not create a custom error: {unexpected:?}")
};
&repr == custom
} else {
false
}
}
}
impl TryFrom<i32> for ErrorCode {
type Error = i32;
fn try_from(code: i32) -> Result<ErrorCode, Self::Error> {
use ErrorCode::*;
Ok(match code {
1 => FailedToReceiveTransaction,
20 => ContractNotFound,
21 => InvalidMessageSelector,
22 => InvalidCallData,
24 => InvalidBlockId,
25 => InvalidTransactionHash,
27 => InvalidTransactionIndex,
28 => InvalidContractClassHash,
31 => PageSizeTooBig,
32 => NoBlocks,
40 => ContractError,
x => return Err(x),
})
}
}
impl ErrorCode {
/// Returns the message specified in the openrpc api spec.
fn as_str(&self) -> &'static str {
match self {
ErrorCode::FailedToReceiveTransaction => "Failed to write transaction",
ErrorCode::ContractNotFound => "Contract not found",
ErrorCode::InvalidMessageSelector => "Invalid message selector",
ErrorCode::InvalidCallData => "Invalid call data",
ErrorCode::InvalidBlockId => "Invalid block id",
ErrorCode::InvalidTransactionHash => "Invalid transaction hash",
ErrorCode::InvalidTransactionIndex => "Invalid transaction index in a block",
ErrorCode::InvalidContractClassHash => {
"The supplied contract class hash is invalid or unknown"
}
ErrorCode::PageSizeTooBig => "Requested page size is too big",
ErrorCode::ContractError => "Contract error",
ErrorCode::NoBlocks => "There are no blocks",
}
}
}
impl std::string::ToString for ErrorCode {
fn to_string(&self) -> String {
self.as_str().to_owned()
}
}
impl From<ErrorCode> for jsonrpsee::core::error::Error {
fn from(ecode: ErrorCode) -> Self {
use jsonrpsee::core::error::Error;
use jsonrpsee::types::error::{CallError, ErrorObject};
if ecode == ErrorCode::PageSizeTooBig {
#[cfg(debug_assertions)]
panic!("convert jsonrpsee::...::Error from EventFilterError to get error data");
}
let error = ecode as i32;
Error::Call(CallError::Custom(ErrorObject::owned(
error,
ecode.to_string(),
// this is insufficient in every situation (PageSizeTooBig)
None::<()>,
)))
}
}
/// L2 state update as returned by the [RPC API v0.1.0](https://github.com/starkware-libs/starknet-specs/blob/30e5bafcda60c31b5fb4021b4f5ddcfc18d2ff7d/api/starknet_api_openrpc.json#L846).
///
/// # Serialization
///
/// This structure derives [serde::Deserialize] without depending
/// on the `rpc-full-serde` feature because state updates are
/// stored in the DB as compressed raw JSON bytes.
#[skip_serializing_none]
#[derive(Clone, Debug, serde::Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct StateUpdate {
/// None for `pending`
#[serde(default)]
pub block_hash: Option<StarknetBlockHash>,
pub new_root: GlobalRoot,
pub old_root: GlobalRoot,
pub state_diff: state_update::StateDiff,
}
impl From<sequencer::reply::StateUpdate> for StateUpdate {
fn from(x: sequencer::reply::StateUpdate) -> Self {
Self {
block_hash: x.block_hash,
new_root: x.new_root,
old_root: x.old_root,
state_diff: x.state_diff.into(),
}
}
}
/// State update related substructures.
///
/// # Serialization
///
/// All structures in this module derive [serde::Deserialize] without depending
/// on the `rpc-full-serde` feature because state updates are
/// stored in the DB as compressed raw JSON bytes.
pub mod state_update {
use crate::core::{
ClassHash, ContractAddress, ContractNonce, StorageAddress, StorageValue,
};
use crate::sequencer;
use serde::{Deserialize, Serialize};
/// L2 state diff.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct StateDiff {
pub storage_diffs: Vec<StorageDiff>,
pub declared_contracts: Vec<DeclaredContract>,
pub deployed_contracts: Vec<DeployedContract>,
pub nonces: Vec<Nonce>,
}
impl From<sequencer::reply::state_update::StateDiff> for StateDiff {
fn from(x: sequencer::reply::state_update::StateDiff) -> Self {
Self {
storage_diffs: x
.storage_diffs
.into_iter()
.flat_map(|(contract_address, storage_diffs)| {
storage_diffs.into_iter().map(move |x| StorageDiff {
address: contract_address,
key: x.key,
value: x.value,
})
})
.collect(),
declared_contracts: x
.declared_contracts
.into_iter()
.map(|class_hash| DeclaredContract { class_hash })
.collect(),
deployed_contracts: x
.deployed_contracts
.into_iter()
.map(|deployed_contract| DeployedContract {
address: deployed_contract.address,
class_hash: deployed_contract.class_hash,
})
.collect(),
// FIXME once the sequencer API provides the nonces
nonces: vec![],
}
}
}
/// L2 storage diff of a contract.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct StorageDiff {
pub address: ContractAddress,
pub key: StorageAddress,
pub value: StorageValue,
}
// impl From<sequencer::reply::state_update::StorageDiff> for StorageItem {
// fn from(x: sequencer::reply::state_update::StorageDiff) -> Self {
// Self {
// key: x.key,
// value: x.value,
// }
// }
// }
/// L2 state diff declared contract item.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct DeclaredContract {
pub class_hash: ClassHash,
}
/// L2 state diff deployed contract item.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct DeployedContract {
pub address: ContractAddress,
pub class_hash: ClassHash,
}
/// L2 state diff nonce item.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct Nonce {
pub contract_address: ContractAddress,
pub nonce: ContractNonce,
}
}
/// L2 transaction as returned by the RPC API.
///
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(tag = "type")]
pub enum Transaction {
#[serde(rename = "DECLARE")]
Declare(DeclareTransaction),
#[serde(rename = "INVOKE")]
Invoke(InvokeTransaction),
#[serde(rename = "DEPLOY")]
Deploy(DeployTransaction),
}
impl Transaction {
pub fn hash(&self) -> StarknetTransactionHash {
match self {
Transaction::Declare(declare) => declare.common.hash,
Transaction::Invoke(invoke) => invoke.common.hash,
Transaction::Deploy(deploy) => deploy.hash,
}
}
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct CommonTransactionProperties {
#[serde(rename = "transaction_hash")]
pub hash: StarknetTransactionHash,
#[serde_as(as = "FeeAsHexStr")]
pub max_fee: Fee,
#[serde_as(as = "TransactionVersionAsHexStr")]
pub version: TransactionVersion,
pub signature: Vec<TransactionSignatureElem>,
pub nonce: TransactionNonce,
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct DeclareTransaction {
#[serde(flatten)]
pub common: CommonTransactionProperties,
pub class_hash: ClassHash,
pub sender_address: ContractAddress,
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct InvokeTransaction {
#[serde(flatten)]
pub common: CommonTransactionProperties,
pub contract_address: ContractAddress,
pub entry_point_selector: EntryPoint,
pub calldata: Vec<CallParam>,
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct DeployTransaction {
// This part is a subset of CommonTransactionProperties
#[serde(rename = "transaction_hash")]
pub hash: StarknetTransactionHash,
#[serde_as(as = "TransactionVersionAsHexStr")]
pub version: TransactionVersion,
pub contract_address: ContractAddress,
pub contract_address_salt: ContractAddressSalt,
pub class_hash: ClassHash,
pub constructor_calldata: Vec<ConstructorParam>,
}
impl TryFrom<sequencer::reply::Transaction> for Transaction {
type Error = anyhow::Error;
fn try_from(txn: sequencer::reply::Transaction) -> Result<Self, Self::Error> {
let txn = txn
.transaction
.ok_or_else(|| anyhow::anyhow!("Transaction not found."))?;
Ok(txn.into())
}
}
impl From<sequencer::reply::transaction::Transaction> for Transaction {
fn from(txn: sequencer::reply::transaction::Transaction) -> Self {
Self::from(&txn)
}
}
impl From<&sequencer::reply::transaction::Transaction> for Transaction {
fn from(txn: &sequencer::reply::transaction::Transaction) -> Self {
match txn {
sequencer::reply::transaction::Transaction::Invoke(txn) => {
Self::Invoke(InvokeTransaction {
common: CommonTransactionProperties {
hash: txn.transaction_hash,
max_fee: txn.max_fee,
// no `version` in invoke transactions
version: TransactionVersion(Default::default()),
signature: txn.signature.clone(),
// no `nonce` in invoke transactions
nonce: TransactionNonce(Default::default()),
},
contract_address: txn.contract_address,
entry_point_selector: txn.entry_point_selector,
calldata: txn.calldata.clone(),
})
}
sequencer::reply::transaction::Transaction::Declare(txn) => {
Self::Declare(DeclareTransaction {
common: CommonTransactionProperties {
hash: txn.transaction_hash,
max_fee: txn.max_fee,
version: txn.version,
signature: txn.signature.clone(),
nonce: txn.nonce,
},
class_hash: txn.class_hash,
sender_address: txn.sender_address,
})
}
sequencer::reply::transaction::Transaction::Deploy(txn) => {
Self::Deploy(DeployTransaction {
hash: txn.transaction_hash,
// no `version` in deploy transactions
version: TransactionVersion(Default::default()),
contract_address: txn.contract_address,
contract_address_salt: txn.contract_address_salt,
class_hash: txn.class_hash,
constructor_calldata: txn.constructor_calldata.clone(),
})
}
}
}
}
/// L2 transaction receipt as returned by the RPC API.
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(untagged)]
pub enum TransactionReceipt {
Invoke(InvokeTransactionReceipt),
// We can't differentiate between declare and deploy in an untagged enum: they
// have the same properties in the JSON.
DeclareOrDeploy(DeclareOrDeployTransactionReceipt),
// Pending receipts don't have status, status_data, block_hash, block_number fields
PendingInvoke(PendingInvokeTransactionReceipt),
PendingDeclareOrDeploy(PendingDeclareOrDeployTransactionReceipt),
}
impl TransactionReceipt {
pub fn hash(&self) -> StarknetTransactionHash {
match self {
Self::Invoke(tx) => tx.common.transaction_hash,
Self::DeclareOrDeploy(tx) => tx.common.transaction_hash,
Self::PendingInvoke(tx) => tx.common.transaction_hash,
Self::PendingDeclareOrDeploy(tx) => tx.common.transaction_hash,
}
}
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct InvokeTransactionReceipt {
#[serde(flatten)]
pub common: CommonTransactionReceiptProperties,
pub messages_sent: Vec<transaction_receipt::MessageToL1>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub l1_origin_message: Option<transaction_receipt::MessageToL2>,
pub events: Vec<transaction_receipt::Event>,
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct CommonTransactionReceiptProperties {
pub transaction_hash: StarknetTransactionHash,
#[serde_as(as = "FeeAsHexStr")]
pub actual_fee: Fee,
pub status: TransactionStatus,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status_data: Option<String>,
pub block_hash: StarknetBlockHash,
pub block_number: StarknetBlockNumber,
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct DeclareOrDeployTransactionReceipt {
#[serde(flatten)]
pub common: CommonTransactionReceiptProperties,
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct PendingInvokeTransactionReceipt {
#[serde(flatten)]
pub common: CommonPendingTransactionReceiptProperties,
pub messages_sent: Vec<transaction_receipt::MessageToL1>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub l1_origin_message: Option<transaction_receipt::MessageToL2>,
pub events: Vec<transaction_receipt::Event>,
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct CommonPendingTransactionReceiptProperties {
pub transaction_hash: StarknetTransactionHash,
#[serde_as(as = "FeeAsHexStr")]
pub actual_fee: Fee,
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct PendingDeclareOrDeployTransactionReceipt {
#[serde(flatten)]
pub common: CommonPendingTransactionReceiptProperties,
}
impl TransactionReceipt {
pub fn pending_from(
receipt: sequencer::reply::transaction::Receipt,
transaction: &sequencer::reply::transaction::Transaction,
) -> Self {
match transaction {
sequencer::reply::transaction::Transaction::Declare(_)
| sequencer::reply::transaction::Transaction::Deploy(_) => {
Self::PendingDeclareOrDeploy(PendingDeclareOrDeployTransactionReceipt {
common: CommonPendingTransactionReceiptProperties {
transaction_hash: receipt.transaction_hash,
actual_fee: receipt
.actual_fee
.unwrap_or_else(|| Fee(Default::default())),
},
})
}
sequencer::reply::transaction::Transaction::Invoke(_) => {
Self::PendingInvoke(PendingInvokeTransactionReceipt {
common: CommonPendingTransactionReceiptProperties {
transaction_hash: receipt.transaction_hash,
actual_fee: receipt
.actual_fee
.unwrap_or_else(|| Fee(Default::default())),
},
messages_sent: receipt
.l2_to_l1_messages
.into_iter()
.map(transaction_receipt::MessageToL1::from)
.collect(),
l1_origin_message: receipt
.l1_to_l2_consumed_message
.map(transaction_receipt::MessageToL2::from),
events: receipt
.events
.into_iter()
.map(transaction_receipt::Event::from)
.collect(),
})
}
}
}
pub fn with_block_data(
receipt: sequencer::reply::transaction::Receipt,
status: BlockStatus,
block_hash: StarknetBlockHash,
block_number: StarknetBlockNumber,
transaction: &sequencer::reply::transaction::Transaction,
) -> Self {
match transaction {
sequencer::reply::transaction::Transaction::Declare(_)
| sequencer::reply::transaction::Transaction::Deploy(_) => {
Self::DeclareOrDeploy(DeclareOrDeployTransactionReceipt {
common: CommonTransactionReceiptProperties {
transaction_hash: receipt.transaction_hash,
actual_fee: receipt
.actual_fee
.unwrap_or_else(|| Fee(Default::default())),
status: status.into(),
// TODO: at the moment not available in sequencer replies
status_data: None,
block_hash,
block_number,
},
})
}
sequencer::reply::transaction::Transaction::Invoke(_) => {
Self::Invoke(InvokeTransactionReceipt {
common: CommonTransactionReceiptProperties {
transaction_hash: receipt.transaction_hash,
actual_fee: receipt
.actual_fee
.unwrap_or_else(|| Fee(Default::default())),
status: status.into(),
// TODO: at the moment not available in sequencer replies
status_data: None,
block_hash,
block_number,
},
messages_sent: receipt
.l2_to_l1_messages
.into_iter()
.map(transaction_receipt::MessageToL1::from)
.collect(),
l1_origin_message: receipt
.l1_to_l2_consumed_message
.map(transaction_receipt::MessageToL2::from),
events: receipt
.events
.into_iter()
.map(transaction_receipt::Event::from)
.collect(),
})
}
}
}
}
/// Transaction receipt related substructures.
pub mod transaction_receipt {
use crate::{
core::{
ContractAddress, EthereumAddress, EventData, EventKey, L1ToL2MessagePayloadElem,
L2ToL1MessagePayloadElem,
},
rpc::serde::EthereumAddressAsHexStr,
sequencer::reply::transaction::{L1ToL2Message, L2ToL1Message},
};
use serde::Serialize;
use serde_with::serde_as;
use std::convert::From;
/// Message sent from L2 to L1.
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct MessageToL1 {
#[serde_as(as = "EthereumAddressAsHexStr")]
pub to_address: EthereumAddress,
pub payload: Vec<L2ToL1MessagePayloadElem>,
}
impl From<L2ToL1Message> for MessageToL1 {
fn from(msg: L2ToL1Message) -> Self {
Self {
to_address: msg.to_address,
payload: msg.payload,
}
}
}
/// Message sent from L1 to L2.
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct MessageToL2 {
#[serde_as(as = "EthereumAddressAsHexStr")]
pub from_address: EthereumAddress,
pub payload: Vec<L1ToL2MessagePayloadElem>,
}
impl From<L1ToL2Message> for MessageToL2 {
fn from(msg: L1ToL2Message) -> Self {
Self {
from_address: msg.from_address,
payload: msg.payload,
}
}
}
/// Event emitted as a part of a transaction.
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct Event {
pub from_address: ContractAddress,
pub keys: Vec<EventKey>,
pub data: Vec<EventData>,
}
impl From<crate::sequencer::reply::transaction::Event> for Event {
fn from(e: crate::sequencer::reply::transaction::Event) -> Self {
Self {
from_address: e.from_address,
keys: e.keys,
data: e.data,
}
}
}
}
/// Represents transaction status.
#[derive(Copy, Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub enum TransactionStatus {
#[serde(rename = "PENDING")]
Pending,
#[serde(rename = "ACCEPTED_ON_L2")]
AcceptedOnL2,
#[serde(rename = "ACCEPTED_ON_L1")]
AcceptedOnL1,
#[serde(rename = "REJECTED")]
Rejected,
}
impl From<BlockStatus> for TransactionStatus {
fn from(status: BlockStatus) -> Self {
match status {
BlockStatus::Pending => TransactionStatus::Pending,
BlockStatus::AcceptedOnL2 => TransactionStatus::AcceptedOnL2,
BlockStatus::AcceptedOnL1 => TransactionStatus::AcceptedOnL1,
BlockStatus::Rejected => TransactionStatus::Rejected,
}
}
}
/// Describes Starknet's syncing status RPC reply.
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(untagged)]
pub enum Syncing {
False(bool),
Status(syncing::Status),
}
impl std::fmt::Display for Syncing {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Syncing::False(_) => f.write_str("false"),
Syncing::Status(status) => {
write!(f, "{}", status)
}
}
}
}
/// Starknet's syncing status substructures.
pub mod syncing {
use crate::{
core::{StarknetBlockHash, StarknetBlockNumber},
rpc::serde::StarknetBlockNumberAsHexStr,
};
use serde::Serialize;
use serde_with::serde_as;
/// Represents Starknet node syncing status.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct Status {
#[serde(flatten, with = "prefix_starting")]
pub starting: NumberedBlock,
#[serde(flatten, with = "prefix_current")]
pub current: NumberedBlock,
#[serde(flatten, with = "prefix_highest")]
pub highest: NumberedBlock,
}
serde_with::with_prefix!(prefix_starting "starting_");
serde_with::with_prefix!(prefix_current "current_");
serde_with::with_prefix!(prefix_highest "highest_");
impl std::fmt::Display for Status {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"starting: {:?}, current: {:?}, highest: {:?}",
self.starting, self.current, self.highest,
)
}
}
/// Block hash and a number, for `starknet_syncing` response only.
#[serde_as]
#[derive(Clone, Copy, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct NumberedBlock {
#[serde(rename = "block_hash")]
pub hash: StarknetBlockHash,
#[serde_as(as = "StarknetBlockNumberAsHexStr")]
#[serde(rename = "block_num")]
pub number: StarknetBlockNumber,
}
impl std::fmt::Debug for NumberedBlock {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(fmt, "({}, {})", self.hash.0, self.number.0)
}
}
impl From<(StarknetBlockHash, StarknetBlockNumber)> for NumberedBlock {
fn from((hash, number): (StarknetBlockHash, StarknetBlockNumber)) -> Self {
NumberedBlock { hash, number }
}
}
/// Helper to make it a bit less painful to write examples.
#[cfg(test)]
impl<'a> From<(&'a str, u64)> for NumberedBlock {
fn from((h, n): (&'a str, u64)) -> Self {
use stark_hash::StarkHash;
NumberedBlock {
hash: StarknetBlockHash(StarkHash::from_hex_str(h).unwrap()),
number: StarknetBlockNumber(n),
}
}
}
}
#[test]
fn roundtrip_syncing() {
use syncing::NumberedBlock;
let examples = [
(line!(), "false", Syncing::False(false)),
// this shouldn't exist but it exists now
(line!(), "true", Syncing::False(true)),
(
line!(),
r#"{"starting_block_hash":"0xa","starting_block_num":"0x1","current_block_hash":"0xb","current_block_num":"0x2","highest_block_hash":"0xc","highest_block_num":"0x3"}"#,
Syncing::Status(syncing::Status {
starting: NumberedBlock::from(("a", 1)),
current: NumberedBlock::from(("b", 2)),
highest: NumberedBlock::from(("c", 3)),
}),
),
];
for (line, input, expected) in examples {
let parsed = serde_json::from_str::<Syncing>(input).unwrap();
let output = serde_json::to_string(&parsed).unwrap();
assert_eq!(parsed, expected, "example from line {}", line);
assert_eq!(&output, input, "example from line {}", line);
}
}
/// Describes an emitted event returned by starknet_getEvents
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct EmittedEvent {
pub data: Vec<EventData>,
pub keys: Vec<EventKey>,
pub from_address: ContractAddress,
/// [None] for pending events.
pub block_hash: Option<StarknetBlockHash>,
/// [None] for pending events.
pub block_number: Option<StarknetBlockNumber>,
pub transaction_hash: StarknetTransactionHash,
}
impl From<crate::storage::StarknetEmittedEvent> for EmittedEvent {
fn from(event: crate::storage::StarknetEmittedEvent) -> Self {
Self {
data: event.data,
keys: event.keys,
from_address: event.from_address,
block_hash: Some(event.block_hash),
block_number: Some(event.block_number),
transaction_hash: event.transaction_hash,
}
}
}
// Result type for starknet_getEvents
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct GetEventsResult {
pub events: Vec<EmittedEvent>,
pub page_number: usize,
pub is_last_page: bool,
}
// Result type for starknet_addInvokeTransaction
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct InvokeTransactionResult {
pub transaction_hash: StarknetTransactionHash,
}
// Result type for starknet_addDeclareTransaction
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct DeclareTransactionResult {
pub transaction_hash: StarknetTransactionHash,
pub class_hash: ClassHash,
}
// Result type for starknet_addDeployTransaction
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct DeployTransactionResult {
pub transaction_hash: StarknetTransactionHash,
pub contract_address: ContractAddress,
}
/// Return type of transaction fee estimation
#[serde_as]
#[derive(Clone, Debug, serde::Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct FeeEstimate {
/// The Ethereum gas cost of the transaction
#[serde_as(as = "crate::rpc::serde::H256AsHexStr")]
#[serde(rename = "gas_consumed")]
pub consumed: web3::types::H256,
/// The gas price (in gwei) that was used in the cost estimation (input to fee estimation)
#[serde_as(as = "crate::rpc::serde::H256AsHexStr")]
pub gas_price: web3::types::H256,
/// The estimated fee for the transaction (in gwei), product of gas_consumed and gas_price
#[serde_as(as = "crate::rpc::serde::H256AsHexStr")]
#[serde(rename = "overall_fee")]
pub fee: web3::types::H256,
}
#[cfg(test)]
mod tests {
macro_rules! fixture {
($file_name:literal) => {
include_str!(concat!("../../fixtures/rpc/0.31.0/", $file_name))
.replace(&[' ', '\n'], "")
};
}
/// The aim of these tests is to check if serialization works correctly
/// **without resorting to deserialization to prepare the test data**,
/// which in itself could contain an "opposite phase" bug that cancels out.
///
/// Deserialization is tested btw, because the fixture and the data is already available.
///
/// These tests were added due to recurring regressions stemming from, among others:
/// - `serde(flatten)` and it's side-effects (for example when used in conjunction with `skip_serializing_none`),
/// - `*AsDecimalStr*` creeping in from `sequencer::reply` as opposed to spec.
mod serde {
use super::super::*;
use crate::starkhash;
use pretty_assertions::assert_eq;
#[test]
fn block_and_transaction() {
impl Block {
pub fn test_data() -> Self {
let common = CommonTransactionProperties {
hash: StarknetTransactionHash(starkhash!("04")),
max_fee: Fee(web3::types::H128::from_low_u64_be(0x5)),
version: TransactionVersion(web3::types::H256::from_low_u64_be(0x6)),
signature: vec![TransactionSignatureElem(starkhash!("07"))],
nonce: TransactionNonce(starkhash!("08")),
};
Self {
status: BlockStatus::AcceptedOnL1,
block_hash: Some(StarknetBlockHash(starkhash!("00"))),
parent_hash: StarknetBlockHash(starkhash!("01")),
block_number: Some(StarknetBlockNumber::GENESIS),
new_root: Some(GlobalRoot(starkhash!("02"))),
timestamp: StarknetBlockTimestamp(1),
sequencer_address: SequencerAddress(starkhash!("03")),
transactions: Transactions::Full(vec![
Transaction::Declare(DeclareTransaction {
common: common.clone(),
class_hash: ClassHash(starkhash!("09")),
sender_address: ContractAddress(starkhash!("0a")),
}),
Transaction::Invoke(InvokeTransaction {
common,
contract_address: ContractAddress(starkhash!("0b")),
entry_point_selector: EntryPoint(starkhash!("0c")),
calldata: vec![CallParam(starkhash!("0d"))],
}),
Transaction::Deploy(DeployTransaction {
hash: StarknetTransactionHash(starkhash!("0e")),
version: TransactionVersion(
web3::types::H256::from_low_u64_be(1),
),
contract_address: ContractAddress(starkhash!("0f")),
contract_address_salt: ContractAddressSalt(starkhash!("ee")),
class_hash: ClassHash(starkhash!("10")),
constructor_calldata: vec![ConstructorParam(starkhash!("11"))],
}),
]),
}
}
}
let data = vec![
// All fields populated
Block::test_data(),
// All optional are None
Block {
block_hash: None,
block_number: None,
new_root: None,
transactions: Transactions::HashesOnly(vec![StarknetTransactionHash(
starkhash!("04"),
)]),
..Block::test_data()
},
];
assert_eq!(
serde_json::to_string(&data).unwrap(),
fixture!("block.json")
);
assert_eq!(
serde_json::from_str::<Vec<Block>>(&fixture!("block.json")).unwrap(),
data
);
}
#[test]
fn receipt() {
impl CommonTransactionReceiptProperties {
pub fn test_data() -> Self {
Self {
transaction_hash: StarknetTransactionHash(starkhash!("00")),
actual_fee: Fee(web3::types::H128::from_low_u64_be(0x1)),
status: TransactionStatus::AcceptedOnL1,
status_data: Some("blah".to_string()),
block_hash: StarknetBlockHash(starkhash!("0aaa")),
block_number: StarknetBlockNumber(3),
}
}
}
impl CommonPendingTransactionReceiptProperties {
pub fn test_data() -> Self {
Self {
transaction_hash: StarknetTransactionHash(starkhash!("01")),
actual_fee: Fee(web3::types::H128::from_low_u64_be(0x2)),
}
}
}
impl InvokeTransactionReceipt {
pub fn test_data() -> Self {
Self {
common: CommonTransactionReceiptProperties::test_data(),
messages_sent: vec![transaction_receipt::MessageToL1 {
to_address: crate::core::EthereumAddress(
web3::types::H160::from_low_u64_be(0x2),
),
payload: vec![crate::core::L2ToL1MessagePayloadElem(starkhash!(
"03"
))],
}],
l1_origin_message: Some(transaction_receipt::MessageToL2 {
from_address: crate::core::EthereumAddress(
web3::types::H160::from_low_u64_be(0x4),
),
payload: vec![crate::core::L1ToL2MessagePayloadElem(starkhash!(
"05"
))],
}),
events: vec![transaction_receipt::Event {
from_address: ContractAddress(starkhash!("06")),
keys: vec![EventKey(starkhash!("07"))],
data: vec![EventData(starkhash!("08"))],
}],
}
}
}
impl PendingInvokeTransactionReceipt {
pub fn test_data() -> Self {
Self {
common: CommonPendingTransactionReceiptProperties::test_data(),
messages_sent: vec![transaction_receipt::MessageToL1 {
to_address: crate::core::EthereumAddress(
web3::types::H160::from_low_u64_be(0x5),
),
payload: vec![crate::core::L2ToL1MessagePayloadElem(starkhash!(
"06"
))],
}],
l1_origin_message: Some(transaction_receipt::MessageToL2 {
from_address: crate::core::EthereumAddress(
web3::types::H160::from_low_u64_be(0x77),
),
payload: vec![crate::core::L1ToL2MessagePayloadElem(starkhash!(
"07"
))],
}),
events: vec![transaction_receipt::Event {
from_address: ContractAddress(starkhash!("a6")),
keys: vec![EventKey(starkhash!("a7"))],
data: vec![EventData(starkhash!("a8"))],
}],
}
}
}
let data = vec![
// All fields populated
TransactionReceipt::Invoke(InvokeTransactionReceipt::test_data()),
// All optional are None
TransactionReceipt::Invoke(InvokeTransactionReceipt {
common: CommonTransactionReceiptProperties {
status_data: None,
..CommonTransactionReceiptProperties::test_data()
},
l1_origin_message: None,
events: vec![],
..InvokeTransactionReceipt::test_data()
}),
// Somewhat redundant, but want to exhaust the variants
TransactionReceipt::DeclareOrDeploy(DeclareOrDeployTransactionReceipt {
common: CommonTransactionReceiptProperties::test_data(),
}),
TransactionReceipt::PendingInvoke(PendingInvokeTransactionReceipt::test_data()),
TransactionReceipt::PendingDeclareOrDeploy(
PendingDeclareOrDeployTransactionReceipt {
common: CommonPendingTransactionReceiptProperties::test_data(),
},
),
];
assert_eq!(
serde_json::to_string(&data).unwrap(),
fixture!("receipt.json")
);
assert_eq!(
serde_json::from_str::<Vec<TransactionReceipt>>(&fixture!("receipt.json"))
.unwrap(),
data
);
}
}
}
}
|
//! Tests auto-converted from "sass-spec/spec/non_conformant/sass/semicolon"
#[allow(unused)]
use super::rsass;
// From "sass-spec/spec/non_conformant/sass/semicolon/at_rule.hrx"
// From "sass-spec/spec/non_conformant/sass/semicolon/content.hrx"
// From "sass-spec/spec/non_conformant/sass/semicolon/debug.hrx"
// From "sass-spec/spec/non_conformant/sass/semicolon/declaration.hrx"
// From "sass-spec/spec/non_conformant/sass/semicolon/error.hrx"
// From "sass-spec/spec/non_conformant/sass/semicolon/import.hrx"
// From "sass-spec/spec/non_conformant/sass/semicolon/include.hrx"
// From "sass-spec/spec/non_conformant/sass/semicolon/return.hrx"
// From "sass-spec/spec/non_conformant/sass/semicolon/variable.hrx"
// From "sass-spec/spec/non_conformant/sass/semicolon/warn.hrx"
|
use minifb;
pub struct Window {
window: minifb::Window,
width: usize,
height: usize,
}
impl Window {
pub fn new(width: usize, height: usize) -> Window {
let mut options = minifb::WindowOptions::default();
options.scale = minifb::Scale::X8;
let window = minifb::Window::new(
"",
width,
height,
options,
).unwrap();
Window { window, width, height }
}
pub fn set_title(&mut self, title: &str) {
self.window.set_title(title);
}
pub fn update(&mut self, buffer: &[bool]) {
let buffer: Vec<u32> = buffer.iter()
.map(|x| if *x { 0x00ECF0F1 } else { 0 })
.collect();
self.window
.update_with_buffer(&buffer, self.width, self.height)
.unwrap();
}
pub fn is_open(&self) -> bool {
self.window.is_open()
}
pub fn get_keys_pressed(&mut self) -> Option<Vec<u8>> {
self.window.get_keys()
.map(|keys| keys.iter().filter_map(|k| Self::decode_key(&k)).collect())
.filter(|keys: &Vec<u8>| !keys.is_empty())
}
fn decode_key(key: &minifb::Key) -> Option<u8> {
match key {
minifb::Key::Key1 => Some(0x1),
minifb::Key::Key2 => Some(0x2),
minifb::Key::Key3 => Some(0x3),
minifb::Key::Key4 => Some(0xC),
minifb::Key::Q => Some(0x4),
minifb::Key::W => Some(0x5),
minifb::Key::E => Some(0x6),
minifb::Key::R => Some(0xD),
minifb::Key::A => Some(0x7),
minifb::Key::S => Some(0x8),
minifb::Key::D => Some(0x9),
minifb::Key::F => Some(0xE),
minifb::Key::Z => Some(0xA),
minifb::Key::X => Some(0x0),
minifb::Key::C => Some(0xB),
minifb::Key::V => Some(0xF),
_ => None,
}
}
}
|
//! Traits and basic implementations of packet encoding/decoding functionality.
//!
//! This module exports two main traits: `FromPacketBytes` and `ToPacketBytes`.
//! These are used to specify how various types are encoded/decoded in the ROTMG
//! network protocol. Implementations are also provided for primitives and other
//! fundamental types, which are then composed to provide implementations for
//! more complex types - the packets themselves.
mod option;
mod primitives;
mod string;
mod unit;
mod vec;
use crate::raw::RawPacket;
use std::fmt::Display;
use std::marker::PhantomData;
use std::string::FromUtf8Error;
/// An error reading or writing a packet.
#[derive(Debug, Clone, thiserror::Error)]
pub enum PacketFormatError {
/// The end of the packet was encountered while more data was expected.
#[error("Expected at least {0} more bytes")]
UnexpectedEnd(usize),
/// A string contained invalid UTF-8.
#[error("Invalid UTF-8: {0}")]
Utf8Error(#[from] FromUtf8Error),
/// A field was too large to be encoded/decoded within the constraints of
/// the required integer type.
#[error("Field too large: cannot convert {length} to {repr}")]
FieldTooLarge {
/// The length that was being encoded.
length: String,
/// The type that was being used to encode the length.
repr: &'static str,
},
/// An unrecognized `StatType` was encountered, making it impossible to
/// determine how to parse the remaining data.
#[error("No known StatType associated with value {0}")]
UnknownStatType(u8),
/// Couldn't identify the `PacketType` for a packet due to incomplete
/// mappings.
#[error("No known mapping for packet ID {0}")]
UnmappedID(u8),
}
impl PacketFormatError {
fn too_large<T>(length: &dyn Display) -> Self {
Self::FieldTooLarge {
length: length.to_string(),
repr: std::any::type_name::<T>(),
}
}
}
/// A simple interface for reading bytes from a raw packet.
pub struct PacketReader<'a> {
remaining: &'a [u8],
}
impl<'a> PacketReader<'a> {
/// Create a new reader for the given packet.
pub fn new(packet: &'a RawPacket) -> Self {
PacketReader {
remaining: packet.payload(),
}
}
/// Check whether there are any unparsed bytes remaining.
pub fn is_empty(&self) -> bool {
self.remaining.is_empty()
}
/// Get the number of remaining unparsed bytes.
pub fn len(&self) -> usize {
self.remaining.len()
}
/// Attempt to take the next `n` bytes from this reader, returning an error
/// if there aren't enough remaining.
pub fn take(&mut self, n: usize) -> Result<&'a [u8], Box<PacketFormatError>> {
if self.remaining.len() < n {
Err(Box::new(PacketFormatError::UnexpectedEnd(n)))
} else {
let (taken, remaining) = self.remaining.split_at(n);
self.remaining = remaining;
Ok(taken)
}
}
/// Take all remaining bytes from this reader.
///
/// If no bytes remain, returns an empty slice.
pub fn take_all(&mut self) -> &'a [u8] {
std::mem::take(&mut self.remaining)
}
}
/// Data that can be read from a packet.
///
/// Note that the type this is implemented on need not match the actual returned
/// type.
pub trait FromPacketBytes {
/// The output type of this decoder.
type Output: Sized + 'static;
/// Read data from the given packet.
fn from_packet(reader: &mut PacketReader) -> Result<Self::Output, Box<PacketFormatError>>;
}
/// Data that can be written to a packet.
pub trait ToPacketBytes<T> {
//noinspection RsSelfConvention
/// Write data to the given packet.
fn to_packet(value: T, packet: &mut Vec<u8>) -> Result<(), Box<PacketFormatError>>;
}
/// A dummy type indicating that a dynamically sized type is prefixed with a
/// length field.
pub struct WithLen<N, T>(PhantomData<N>, PhantomData<T>);
/// A dummy type indicating that a dynamically sized type should capture all
/// remaining bytes.
pub struct CaptureRemaining<T>(PhantomData<T>);
#[cfg(test)]
mod tests {
use super::*;
macro_rules! roundtrip_tests {
( $( $name:ident < $type:ty > ( $init:expr ) ; )* ) => {
$(
#[test]
fn $name() {
let original: <$type as FromPacketBytes>::Output = $init;
let mut packet = vec![];
<$type as ToPacketBytes<_>>::to_packet(original.clone(), &mut packet).unwrap();
let mut reader = PacketReader { remaining: &packet };
let parsed = <$type as FromPacketBytes>::from_packet(&mut reader).unwrap();
assert_eq!(
original,
parsed,
"expected {:?}, got {:?} with encoded repr {:#x?}",
original,
parsed,
packet
);
}
)*
}
}
roundtrip_tests! {
// primitives
test_roundtrip_bool<bool>(rand::random());
test_roundtrip_u8<u8>(rand::random());
test_roundtrip_u16<u16>(rand::random());
test_roundtrip_u32<u32>(rand::random());
test_roundtrip_u64<u64>(rand::random());
test_roundtrip_i8<i8>(rand::random());
test_roundtrip_i16<i16>(rand::random());
test_roundtrip_i32<i32>(rand::random());
test_roundtrip_i64<i64>(rand::random());
// option
test_roundtrip_none<Option<i32>>(None);
test_roundtrip_some_i32<Option<i32>>(Some(rand::random()));
// str
test_roundtrip_str_u16<WithLen<u16, String>>("hello world".to_string());
test_roundtrip_str_u32<WithLen<u32, String>>("hello world".to_string());
// vec
test_roundtrip_vec_u16<WithLen<u16, Vec<i32>>>(vec![1, 3, -42]);
test_roundtrip_vec_u32<WithLen<u32, Vec<i64>>>(vec![i64::MAX, 42, 8]);
test_roundtrip_vec_remaining<CaptureRemaining<Vec<u8>>>(b"hello world".to_vec());
// nested dynamically sized types
test_roundtrip_complex_none<Option<WithLen<u16, Vec<WithLen<u32, String>>>>>(None);
test_roundtrip_complex_some<Option<WithLen<u16, Vec<WithLen<u32, String>>>>>(Some(vec!["hello".to_string(), "world".to_string()]));
}
}
|
#[doc = "Register `APB2RSTR` reader"]
pub type R = crate::R<APB2RSTR_SPEC>;
#[doc = "Register `APB2RSTR` writer"]
pub type W = crate::W<APB2RSTR_SPEC>;
#[doc = "Field `SYSCFGRST` reader - System configuration (SYSCFG) reset"]
pub type SYSCFGRST_R = crate::BitReader;
#[doc = "Field `SYSCFGRST` writer - System configuration (SYSCFG) reset"]
pub type SYSCFGRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SDMMCRST` reader - SDMMC reset"]
pub type SDMMCRST_R = crate::BitReader;
#[doc = "Field `SDMMCRST` writer - SDMMC reset"]
pub type SDMMCRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM1RST` reader - TIM1 timer reset"]
pub type TIM1RST_R = crate::BitReader;
#[doc = "Field `TIM1RST` writer - TIM1 timer reset"]
pub type TIM1RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SPI1RST` reader - SPI1 reset"]
pub type SPI1RST_R = crate::BitReader;
#[doc = "Field `SPI1RST` writer - SPI1 reset"]
pub type SPI1RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM8RST` reader - TIM8 timer reset"]
pub type TIM8RST_R = crate::BitReader;
#[doc = "Field `TIM8RST` writer - TIM8 timer reset"]
pub type TIM8RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `USART1RST` reader - USART1 reset"]
pub type USART1RST_R = crate::BitReader;
#[doc = "Field `USART1RST` writer - USART1 reset"]
pub type USART1RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM15RST` reader - TIM15 timer reset"]
pub type TIM15RST_R = crate::BitReader;
#[doc = "Field `TIM15RST` writer - TIM15 timer reset"]
pub type TIM15RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM16RST` reader - TIM16 timer reset"]
pub type TIM16RST_R = crate::BitReader;
#[doc = "Field `TIM16RST` writer - TIM16 timer reset"]
pub type TIM16RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TIM17RST` reader - TIM17 timer reset"]
pub type TIM17RST_R = crate::BitReader;
#[doc = "Field `TIM17RST` writer - TIM17 timer reset"]
pub type TIM17RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SAI1RST` reader - Serial audio interface 1 (SAI1) reset"]
pub type SAI1RST_R = crate::BitReader;
#[doc = "Field `SAI1RST` writer - Serial audio interface 1 (SAI1) reset"]
pub type SAI1RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SAI2RST` reader - Serial audio interface 2 (SAI2) reset"]
pub type SAI2RST_R = crate::BitReader;
#[doc = "Field `SAI2RST` writer - Serial audio interface 2 (SAI2) reset"]
pub type SAI2RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DFSDMRST` reader - Digital filters for sigma-delata modulators (DFSDM) reset"]
pub type DFSDMRST_R = crate::BitReader;
#[doc = "Field `DFSDMRST` writer - Digital filters for sigma-delata modulators (DFSDM) reset"]
pub type DFSDMRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - System configuration (SYSCFG) reset"]
#[inline(always)]
pub fn syscfgrst(&self) -> SYSCFGRST_R {
SYSCFGRST_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 10 - SDMMC reset"]
#[inline(always)]
pub fn sdmmcrst(&self) -> SDMMCRST_R {
SDMMCRST_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - TIM1 timer reset"]
#[inline(always)]
pub fn tim1rst(&self) -> TIM1RST_R {
TIM1RST_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - SPI1 reset"]
#[inline(always)]
pub fn spi1rst(&self) -> SPI1RST_R {
SPI1RST_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 13 - TIM8 timer reset"]
#[inline(always)]
pub fn tim8rst(&self) -> TIM8RST_R {
TIM8RST_R::new(((self.bits >> 13) & 1) != 0)
}
#[doc = "Bit 14 - USART1 reset"]
#[inline(always)]
pub fn usart1rst(&self) -> USART1RST_R {
USART1RST_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 16 - TIM15 timer reset"]
#[inline(always)]
pub fn tim15rst(&self) -> TIM15RST_R {
TIM15RST_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bit 17 - TIM16 timer reset"]
#[inline(always)]
pub fn tim16rst(&self) -> TIM16RST_R {
TIM16RST_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 18 - TIM17 timer reset"]
#[inline(always)]
pub fn tim17rst(&self) -> TIM17RST_R {
TIM17RST_R::new(((self.bits >> 18) & 1) != 0)
}
#[doc = "Bit 21 - Serial audio interface 1 (SAI1) reset"]
#[inline(always)]
pub fn sai1rst(&self) -> SAI1RST_R {
SAI1RST_R::new(((self.bits >> 21) & 1) != 0)
}
#[doc = "Bit 22 - Serial audio interface 2 (SAI2) reset"]
#[inline(always)]
pub fn sai2rst(&self) -> SAI2RST_R {
SAI2RST_R::new(((self.bits >> 22) & 1) != 0)
}
#[doc = "Bit 24 - Digital filters for sigma-delata modulators (DFSDM) reset"]
#[inline(always)]
pub fn dfsdmrst(&self) -> DFSDMRST_R {
DFSDMRST_R::new(((self.bits >> 24) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - System configuration (SYSCFG) reset"]
#[inline(always)]
#[must_use]
pub fn syscfgrst(&mut self) -> SYSCFGRST_W<APB2RSTR_SPEC, 0> {
SYSCFGRST_W::new(self)
}
#[doc = "Bit 10 - SDMMC reset"]
#[inline(always)]
#[must_use]
pub fn sdmmcrst(&mut self) -> SDMMCRST_W<APB2RSTR_SPEC, 10> {
SDMMCRST_W::new(self)
}
#[doc = "Bit 11 - TIM1 timer reset"]
#[inline(always)]
#[must_use]
pub fn tim1rst(&mut self) -> TIM1RST_W<APB2RSTR_SPEC, 11> {
TIM1RST_W::new(self)
}
#[doc = "Bit 12 - SPI1 reset"]
#[inline(always)]
#[must_use]
pub fn spi1rst(&mut self) -> SPI1RST_W<APB2RSTR_SPEC, 12> {
SPI1RST_W::new(self)
}
#[doc = "Bit 13 - TIM8 timer reset"]
#[inline(always)]
#[must_use]
pub fn tim8rst(&mut self) -> TIM8RST_W<APB2RSTR_SPEC, 13> {
TIM8RST_W::new(self)
}
#[doc = "Bit 14 - USART1 reset"]
#[inline(always)]
#[must_use]
pub fn usart1rst(&mut self) -> USART1RST_W<APB2RSTR_SPEC, 14> {
USART1RST_W::new(self)
}
#[doc = "Bit 16 - TIM15 timer reset"]
#[inline(always)]
#[must_use]
pub fn tim15rst(&mut self) -> TIM15RST_W<APB2RSTR_SPEC, 16> {
TIM15RST_W::new(self)
}
#[doc = "Bit 17 - TIM16 timer reset"]
#[inline(always)]
#[must_use]
pub fn tim16rst(&mut self) -> TIM16RST_W<APB2RSTR_SPEC, 17> {
TIM16RST_W::new(self)
}
#[doc = "Bit 18 - TIM17 timer reset"]
#[inline(always)]
#[must_use]
pub fn tim17rst(&mut self) -> TIM17RST_W<APB2RSTR_SPEC, 18> {
TIM17RST_W::new(self)
}
#[doc = "Bit 21 - Serial audio interface 1 (SAI1) reset"]
#[inline(always)]
#[must_use]
pub fn sai1rst(&mut self) -> SAI1RST_W<APB2RSTR_SPEC, 21> {
SAI1RST_W::new(self)
}
#[doc = "Bit 22 - Serial audio interface 2 (SAI2) reset"]
#[inline(always)]
#[must_use]
pub fn sai2rst(&mut self) -> SAI2RST_W<APB2RSTR_SPEC, 22> {
SAI2RST_W::new(self)
}
#[doc = "Bit 24 - Digital filters for sigma-delata modulators (DFSDM) reset"]
#[inline(always)]
#[must_use]
pub fn dfsdmrst(&mut self) -> DFSDMRST_W<APB2RSTR_SPEC, 24> {
DFSDMRST_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "APB2 peripheral reset register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`apb2rstr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`apb2rstr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct APB2RSTR_SPEC;
impl crate::RegisterSpec for APB2RSTR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`apb2rstr::R`](R) reader structure"]
impl crate::Readable for APB2RSTR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`apb2rstr::W`](W) writer structure"]
impl crate::Writable for APB2RSTR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets APB2RSTR to value 0"]
impl crate::Resettable for APB2RSTR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use std::{collections::HashMap, time::Instant};
pub fn main() {
let start = Instant::now();
assert_eq!(part_1_test(), 436);
assert_eq!(part_1_test_2(), 1836);
// println!("part_1 {:?}", part_1());
// assert_eq!(part_2_test(), 175594);
println!("part_2 {:?}", part_2());
let duration = start.elapsed();
println!("Finished after {:?}", duration);
}
fn part_2() -> u32 {
let mut numbers_to_turn: HashMap<u32, u32> = HashMap::new();
numbers_to_turn.insert(2, 1);
numbers_to_turn.insert(20, 2);
numbers_to_turn.insert(0, 3);
numbers_to_turn.insert(4, 4);
numbers_to_turn.insert(1, 5);
find_last_number_spoken(&numbers_to_turn, 17, 6, 30000000)
}
fn part_2_test() -> u32 {
let mut numbers_to_turn: HashMap<u32, u32> = HashMap::new();
numbers_to_turn.insert(0, 1);
numbers_to_turn.insert(3, 2);
find_last_number_spoken(&numbers_to_turn, 6, 3, 30000000)
}
fn part_1() -> u32 {
let mut numbers_to_turn: HashMap<u32, u32> = HashMap::new();
numbers_to_turn.insert(2, 1);
numbers_to_turn.insert(20, 2);
numbers_to_turn.insert(0, 3);
numbers_to_turn.insert(4, 4);
numbers_to_turn.insert(1, 5);
find_last_number_spoken(&numbers_to_turn, 17, 6, 2020)
}
fn part_1_test_2() -> u32 {
let mut numbers_to_turn: HashMap<u32, u32> = HashMap::new();
numbers_to_turn.insert(3, 1);
numbers_to_turn.insert(1, 2);
find_last_number_spoken(&numbers_to_turn, 2, 3, 2020)
}
fn part_1_test() -> u32 {
let mut numbers_to_turn: HashMap<u32, u32> = HashMap::new();
numbers_to_turn.insert(0, 1);
numbers_to_turn.insert(3, 2);
find_last_number_spoken(&numbers_to_turn, 6, 3, 2020)
}
fn find_last_number_spoken(
initial_numbers_to_turn: &HashMap<u32, u32>,
initial_number_spoken: u32,
starting_turn: u32,
ending_turn: u32,
) -> u32 {
let mut numbers_to_turn = initial_numbers_to_turn.clone();
let mut number_spoken = initial_number_spoken;
for i in starting_turn..ending_turn {
match numbers_to_turn.get(&number_spoken) {
Some(turn) => {
let previous_spoken = turn.clone();
*numbers_to_turn.get_mut(&number_spoken).unwrap() = i;
number_spoken = i - previous_spoken;
}
None => {
numbers_to_turn.insert(number_spoken, i);
number_spoken = 0;
}
}
}
number_spoken
}
|
// list of math submodules
pub mod bundle_adjustment;
pub mod linear_algebra;
pub mod ransac; |
extern crate nalgebra as na;
extern crate rand;
extern crate image;
mod parser;
mod render;
mod utils;
use parser::Model;
use parser::Texture;
use render::Img;
use na::Vector2;
use na::Vector3;
//use na::Cross;
//use na::Norm;
//use na::Dot;
const WIDTH: u32 = 800;
const HEIGHT: u32 = 800;
const DEPTH: u32 = 250;
fn main() {
let mut img = render::Img::create(WIDTH, HEIGHT);
let result = parser::parse("african_head.obj")
.map_err(|e| image::ImageError::from(e))
.and_then(|model|
parser::texture("african_head_diffuse.png")
.map(|texture| (model, texture)))
.map(|(model, texture)| render_all(&model, &texture, &mut img))
.and_then(|_| img.save_zbuf("out_zbuf.png"))
.and_then(|_| img.save("out.png"));
match result {
Ok(()) => println!("done!"),
Err(x) => println!("error: {}", x)
}
}
fn render_all(model: &Model, texture: &Texture, img: &mut Img) {
let ref vs = model.vertices;
let ref vt = model.vt;
let w = WIDTH as f32;
let h = HEIGHT as f32;
let d = DEPTH as f32;
let to_screen_coord = |vertex: &Vector3<f32>| -> Vector3<i32> {
let x = (vertex.x + 1.0) * w / 2.0;
let y = (vertex.y + 1.0) * h / 2.0;
let z = (vertex.z + 1.0) * d / 2.0;
Vector3::new(x as i32, y as i32, z as i32)
};
let to_texture_pos = |vec: Vector2<f32>| -> Vector2<u32> {
let texture_x = (vec.x * texture.width as f32) as u32;
let texture_y = (vec.y * texture.height as f32) as u32;
Vector2::new(texture_x, texture_y)
};
for face in &model.faces {
let mut screen_coords = Vec::with_capacity(3);
let mut world_coords = Vec::with_capacity(3);
let mut vt_coords = Vec::with_capacity(3);
for i in 0..3 {
let ref vertex = vs[face.ps[i]];
let vector = to_screen_coord(&project(vertex));
screen_coords.push(vector);
world_coords.push(vertex);
let vt_pos = vt[face.vt[i]];
let texture_pos = to_texture_pos(vt_pos);
vt_coords.push(texture_pos);
}
let ref x1: Vector3<f32> = world_coords[2] - world_coords[0];
let ref x2: Vector3<f32> = world_coords[1] - world_coords[0];
let n = x1.cross(x2).normalize();
let ref light_dir = Vector3::new(-0., 0., -1.).normalize();
let intensity = n.dot(light_dir);
if intensity > 0. {
img.triangle(screen_coords[0], screen_coords[1], screen_coords[2],
vt_coords[0], vt_coords[1], vt_coords[2],
&texture, intensity);
}
}
}
fn project(v: &Vector3<f32>) -> Vector3<f32> {
let foobar = |a| {
a / (1. - v.z / 3.)
};
Vector3::new(
foobar(v.x * 0.5),
foobar(v.y * 0.5),
v.z,
)
}
|
use super::population::Population;
use super::*;
#[derive(Default)]
pub struct Optimizer {
repos_lengths: Vec<usize>,
demand_pieces: Vec<DemandPiece>,
spacing: usize,
random_seed: u64,
multiplier: f64,
}
impl Optimizer {
pub fn new(decimal_places: usize) -> Self {
Optimizer {
multiplier: 10.0f64.powf(decimal_places as f64),
..Default::default()
}
}
pub fn add_stock_length(&mut self, repos_length: f64) -> &mut Self {
self.repos_lengths.push(self.length_to_usize(repos_length));
self
}
pub fn add_stock_lengths(&mut self, repos_lengths: &[f64]) -> &mut Self {
for &repos_length in repos_lengths {
self.add_stock_length(repos_length);
}
self
}
pub fn add_cut_length(&mut self, demand_length: f64) -> &mut Self {
let demand_piece = DemandPiece {
id: self.demand_pieces.len(),
length: self.length_to_usize(demand_length),
};
self.demand_pieces.push(demand_piece);
self
}
pub fn add_cut_lengths(&mut self, demand_lengths: &[f64]) -> &mut Self {
for &demand_length in demand_lengths {
self.add_cut_length(demand_length);
}
self
}
pub fn set_blade_width(&mut self, blade_width: f64) -> &mut Self {
self.spacing = self.length_to_usize(blade_width);
self
}
pub fn set_random_seed(&mut self, seed: u64) -> &mut Self {
self.random_seed = seed;
self
}
fn length_to_usize(&self, length: f64) -> usize {
(length * self.multiplier) as usize
}
fn length_to_f64(&self, length: usize) -> f64 {
length as f64 / self.multiplier
}
pub fn optimize(&self) -> Result<Solution, ()> {
const POPULATION_SIZE: usize = 100;
let units: Vec<BinPackerUnit> = generate_random_units(
&self.repos_lengths,
&self.demand_pieces,
self.spacing,
POPULATION_SIZE,
self.random_seed,
)
.ok_or(())?;
let mut result_units = Population::new(units)
.set_size(POPULATION_SIZE)
.set_rand_seed(self.random_seed)
.set_breed_factor(0.3)
.set_survival_factor(0.5)
// .epochs_parallel(1000, 4) // 4 CPU cores
.epochs(1000)
.finish();
let best_unit = &mut result_units[0];
let mut used_repos_pieces = Vec::with_capacity(best_unit.bins.len());
for bin in best_unit.bins.iter_mut() {
bin.demand_pieces.sort_by(|a, b| b.length.cmp(&a.length));
let mut used_demand_pieces = Vec::with_capacity(bin.demand_pieces.len());
let mut location = 0;
for demand_piece in &bin.demand_pieces {
let used_demand_piece = CutPiece {
location: self.length_to_f64(location),
length: self.length_to_f64(demand_piece.length),
};
used_demand_pieces.push(used_demand_piece);
location += demand_piece.length + bin.spacing;
}
let used_repos_piece = StockPiece {
length: self.length_to_f64(bin.length),
demand_pieces: used_demand_pieces,
};
used_repos_pieces.push(used_repos_piece);
}
Ok(Solution {
fitness: best_unit.fitness(),
repos_pieces: used_repos_pieces,
})
}
}
pub struct Solution {
pub fitness: f64,
pub repos_pieces: Vec<StockPiece>,
}
pub struct StockPiece {
pub length: f64,
pub demand_pieces: Vec<CutPiece>,
}
pub struct CutPiece {
pub location: f64,
pub length: f64,
}
fn generate_random_units<'a, 'b>(
repos_lengths: &'b [usize],
demand_pieces: &'a [DemandPiece],
spacing: usize,
num_units: usize,
random_seed: u64,
) -> Option<Vec<BinPackerUnit<'a, 'b>>> {
let mut rng: StdRng = SeedableRng::seed_from_u64(random_seed);
let mut demand_piece_refs: Vec<&DemandPiece> = demand_pieces.iter().collect();
let mut units = Vec::with_capacity(num_units);
for _ in 0..num_units {
demand_piece_refs.shuffle(&mut rng);
units.push(BinPackerUnit::new(
repos_lengths,
&demand_piece_refs,
spacing,
&mut rng,
)?);
}
Some(units)
}
|
#![feature(proc_macro_hygiene, decl_macro)]
#[macro_use] extern crate rocket;
mod database;
use regex::Regex;
use rocket::fairing::AdHoc;
use rocket::request::LenientForm;
use rocket::Request;
use rocket::response::NamedFile;
use rocket::State;
use rocket_contrib::templates::Template;
use std::collections::HashMap;
use std::env;
use std::path::Path;
use std::path::PathBuf;
struct AssetsDir(String);
#[get("/<asset..>")]
fn assets(asset: PathBuf, assets_dir: State<AssetsDir>) -> Option<NamedFile> {
NamedFile::open(Path::new(&assets_dir.0).join(asset)).ok()
}
#[derive(FromForm)]
struct Blog {
category: String,
entry: Option<u8>
}
#[get("/blog?<article..>")]
fn blog(article: Option<LenientForm<Blog>>) -> String {
match article {
Some(x) => if let Some(entry) = x.entry {
format!("category: {} entry: {}", x.category, entry)
} else {
format!("category: {} (articles enlisted here:)", x.category)
}
None => "wrong input".into()
}
}
#[get("/")]
fn index() -> Template {
let mut context = HashMap::new();
context.insert(0u32, 'x');
Template::render("index", context)
}
#[get("/data")]
fn data() -> Template {
let mut context = HashMap::new();
context.insert(0u32, 'x');
Template::render("data", context)
}
#[catch(404)]
fn not_found(req: &Request) -> String {
format!("Sorry, '{}' is not a valid path.", req.uri())
}
fn launch_web() -> u32 {
rocket::ignite().mount(
"/",
routes![
assets,
blog,
data,
index,
]
)
.attach(Template::fairing())
.attach(AdHoc::on_attach(
"Assets Config",
|rocket| {
let assets_dir = rocket
.config ()
.get_str ("assets_dir")
.unwrap_or("assets/")
.to_string();
Ok(rocket.manage(AssetsDir(assets_dir)))
}
))
.launch();
return 0;
}
fn help() -> u32 {
println!("");
println!("rustweb --type=w ... launch webserver");
println!("rustweb --type=help ... show help menu");
println!("");
return 0;
}
fn main() {
let args: Vec<String> = env::args().collect();
println!("args: {:?}", args);
let re = Regex::new(r"^--(\w)+=(\w)+$").unwrap();
let mut iter = args
.iter ()
.filter(|x| re.is_match(x))
.map (|x| x.split("=").collect::<Vec<&str>>());
let mut hash_map = HashMap::new();
loop {
match iter.next() {
Some(record) => hash_map.insert(record[0], record[1]),
None => break,
};
}
match hash_map.get("--type") {
Some(x) => match x {
&"w" => launch_web(),
&"d" => database::create_table(),
&_ => help(),
},
None => help()
};
}
|
// This file is part of Substrate.
// Copyright (C) 2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Treasury pallet tests.
#![cfg(test)]
use super::*;
use frame_support::{
assert_noop, assert_ok, impl_outer_event, impl_outer_origin, parameter_types,
traits::{Contains, OnInitialize},
weights::Weight,
};
use sp_core::H256;
use sp_runtime::{
testing::Header,
traits::{BadOrigin, BlakeTwo256, IdentityLookup},
ModuleId, Perbill,
};
use std::cell::RefCell;
impl_outer_origin! {
pub enum Origin for Test where system = frame_system {}
}
mod treasury {
// Re-export needed for `impl_outer_event!`.
pub use super::super::*;
}
impl_outer_event! {
pub enum Event for Test {
system<T>,
pallet_balances<T>,
treasury<T>,
}
}
#[derive(Clone, Eq, PartialEq)]
pub struct Test;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::one();
}
impl frame_system::Trait for Test {
type BaseCallFilter = ();
type Origin = Origin;
type Index = u64;
type BlockNumber = u64;
type Call = ();
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type MaximumBlockWeight = MaximumBlockWeight;
type DbWeight = ();
type BlockExecutionWeight = ();
type ExtrinsicBaseWeight = ();
type MaximumExtrinsicWeight = MaximumBlockWeight;
type AvailableBlockRatio = AvailableBlockRatio;
type MaximumBlockLength = MaximumBlockLength;
type Version = ();
type PalletInfo = ();
type AccountData = pallet_balances::AccountData<u64>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
}
parameter_types! {
pub const ExistentialDeposit: u64 = 1;
}
impl pallet_balances::Trait for Test {
type MaxLocks = ();
type Balance = u64;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
}
thread_local! {
static TEN_TO_FOURTEEN: RefCell<Vec<u128>> = RefCell::new(vec![10,11,12,13,14]);
}
pub struct TenToFourteen;
impl Contains<u128> for TenToFourteen {
fn sorted_members() -> Vec<u128> {
TEN_TO_FOURTEEN.with(|v| v.borrow().clone())
}
#[cfg(feature = "runtime-benchmarks")]
fn add(new: &u128) {
TEN_TO_FOURTEEN.with(|v| {
let mut members = v.borrow_mut();
members.push(*new);
members.sort();
})
}
}
impl ContainsLengthBound for TenToFourteen {
fn max_len() -> usize {
TEN_TO_FOURTEEN.with(|v| v.borrow().len())
}
fn min_len() -> usize {
0
}
}
parameter_types! {
pub const ProposalBond: Permill = Permill::from_percent(5);
pub const ProposalBondMinimum: u64 = 1;
pub const SpendPeriod: u64 = 2;
pub const Burn: Permill = Permill::from_percent(50);
pub const TipCountdown: u64 = 1;
pub const TipFindersFee: Percent = Percent::from_percent(20);
pub const TipReportDepositBase: u64 = 1;
pub const DataDepositPerByte: u64 = 1;
pub const BountyDepositBase: u64 = 80;
pub const BountyDepositPayoutDelay: u64 = 3;
pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry");
pub const BountyUpdatePeriod: u32 = 20;
pub const MaximumReasonLength: u32 = 16384;
pub const BountyCuratorDeposit: Permill = Permill::from_percent(50);
pub const BountyValueMinimum: u64 = 1;
}
impl Trait for Test {
type ModuleId = TreasuryModuleId;
type Currency = pallet_balances::Module<Test>;
type ApproveOrigin = frame_system::EnsureRoot<u128>;
type RejectOrigin = frame_system::EnsureRoot<u128>;
type Tippers = TenToFourteen;
type TipCountdown = TipCountdown;
type TipFindersFee = TipFindersFee;
type TipReportDepositBase = TipReportDepositBase;
type DataDepositPerByte = DataDepositPerByte;
type Event = Event;
type OnSlash = ();
type ProposalBond = ProposalBond;
type ProposalBondMinimum = ProposalBondMinimum;
type SpendPeriod = SpendPeriod;
type Burn = Burn;
type BountyDepositBase = BountyDepositBase;
type BountyDepositPayoutDelay = BountyDepositPayoutDelay;
type BountyUpdatePeriod = BountyUpdatePeriod;
type BountyCuratorDeposit = BountyCuratorDeposit;
type BountyValueMinimum = BountyValueMinimum;
type MaximumReasonLength = MaximumReasonLength;
type BurnDestination = (); // Just gets burned.
type WeightInfo = ();
}
type System = frame_system::Module<Test>;
type Balances = pallet_balances::Module<Test>;
type Treasury = Module<Test>;
pub fn new_test_ext() -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::default().build_storage::<Test>().unwrap();
pallet_balances::GenesisConfig::<Test> {
// Total issuance will be 200 with treasury account initialized at ED.
balances: vec![(0, 100), (1, 98), (2, 1)],
}
.assimilate_storage(&mut t)
.unwrap();
GenesisConfig::default().assimilate_storage::<Test, _>(&mut t).unwrap();
t.into()
}
fn last_event() -> RawEvent<u64, u128, H256, DefaultInstance> {
System::events()
.into_iter()
.map(|r| r.event)
.filter_map(|e| if let Event::treasury(inner) = e { Some(inner) } else { None })
.last()
.unwrap()
}
#[test]
fn genesis_config_works() {
new_test_ext().execute_with(|| {
assert_eq!(Treasury::pot(), 0);
assert_eq!(Treasury::proposal_count(), 0);
});
}
fn tip_hash() -> H256 {
BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u128))
}
#[test]
fn tip_new_cannot_be_used_twice() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10));
assert_noop!(
Treasury::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10),
Error::<Test, _>::AlreadyKnown
);
});
}
#[test]
fn report_awesome_and_tip_works() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3));
assert_eq!(Balances::reserved_balance(0), 12);
assert_eq!(Balances::free_balance(0), 88);
// other reports don't count.
assert_noop!(
Treasury::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3),
Error::<Test, _>::AlreadyKnown
);
let h = tip_hash();
assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10));
assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10));
assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10));
assert_noop!(Treasury::tip(Origin::signed(9), h.clone(), 10), BadOrigin);
System::set_block_number(2);
assert_ok!(Treasury::close_tip(Origin::signed(100), h.into()));
assert_eq!(Balances::reserved_balance(0), 0);
assert_eq!(Balances::free_balance(0), 102);
assert_eq!(Balances::free_balance(3), 8);
});
}
#[test]
fn report_awesome_from_beneficiary_and_tip_works() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0));
assert_eq!(Balances::reserved_balance(0), 12);
assert_eq!(Balances::free_balance(0), 88);
let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u128));
assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10));
assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10));
assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10));
System::set_block_number(2);
assert_ok!(Treasury::close_tip(Origin::signed(100), h.into()));
assert_eq!(Balances::reserved_balance(0), 0);
assert_eq!(Balances::free_balance(0), 110);
});
}
#[test]
fn close_tip_works() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_eq!(Treasury::pot(), 100);
assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10));
let h = tip_hash();
assert_eq!(last_event(), RawEvent::NewTip(h));
assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10));
assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::<Test, _>::StillOpen);
assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10));
assert_eq!(last_event(), RawEvent::TipClosing(h));
assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::<Test, _>::Premature);
System::set_block_number(2);
assert_noop!(Treasury::close_tip(Origin::none(), h.into()), BadOrigin);
assert_ok!(Treasury::close_tip(Origin::signed(0), h.into()));
assert_eq!(Balances::free_balance(3), 10);
assert_eq!(last_event(), RawEvent::TipClosed(h, 3, 10));
assert_noop!(
Treasury::close_tip(Origin::signed(100), h.into()),
Error::<Test, _>::UnknownTip
);
});
}
#[test]
fn retract_tip_works() {
new_test_ext().execute_with(|| {
// with report awesome
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3));
let h = tip_hash();
assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10));
assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10));
assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10));
assert_noop!(
Treasury::retract_tip(Origin::signed(10), h.clone()),
Error::<Test, _>::NotFinder
);
assert_ok!(Treasury::retract_tip(Origin::signed(0), h.clone()));
System::set_block_number(2);
assert_noop!(
Treasury::close_tip(Origin::signed(0), h.into()),
Error::<Test, _>::UnknownTip
);
// with tip new
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10));
let h = tip_hash();
assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10));
assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10));
assert_noop!(
Treasury::retract_tip(Origin::signed(0), h.clone()),
Error::<Test, _>::NotFinder
);
assert_ok!(Treasury::retract_tip(Origin::signed(10), h.clone()));
System::set_block_number(2);
assert_noop!(
Treasury::close_tip(Origin::signed(10), h.into()),
Error::<Test, _>::UnknownTip
);
});
}
#[test]
fn tip_median_calculation_works() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0));
let h = tip_hash();
assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10));
assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000000));
System::set_block_number(2);
assert_ok!(Treasury::close_tip(Origin::signed(0), h.into()));
assert_eq!(Balances::free_balance(3), 10);
});
}
#[test]
fn tip_changing_works() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000));
let h = tip_hash();
assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10000));
assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10000));
assert_ok!(Treasury::tip(Origin::signed(13), h.clone(), 0));
assert_ok!(Treasury::tip(Origin::signed(14), h.clone(), 0));
assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000));
assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 100));
assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10));
System::set_block_number(2);
assert_ok!(Treasury::close_tip(Origin::signed(0), h.into()));
assert_eq!(Balances::free_balance(3), 10);
});
}
#[test]
fn minting_works() {
new_test_ext().execute_with(|| {
// Check that accumulate works when we have Some value in Dummy already.
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_eq!(Treasury::pot(), 100);
});
}
#[test]
fn spend_proposal_takes_min_deposit() {
new_test_ext().execute_with(|| {
assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3));
assert_eq!(Balances::free_balance(0), 99);
assert_eq!(Balances::reserved_balance(0), 1);
});
}
#[test]
fn spend_proposal_takes_proportional_deposit() {
new_test_ext().execute_with(|| {
assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3));
assert_eq!(Balances::free_balance(0), 95);
assert_eq!(Balances::reserved_balance(0), 5);
});
}
#[test]
fn spend_proposal_fails_when_proposer_poor() {
new_test_ext().execute_with(|| {
assert_noop!(
Treasury::propose_spend(Origin::signed(2), 100, 3),
Error::<Test, _>::InsufficientProposersBalance,
);
});
}
#[test]
fn accepted_spend_proposal_ignored_outside_spend_period() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3));
assert_ok!(Treasury::approve_proposal(Origin::root(), 0));
<Treasury as OnInitialize<u64>>::on_initialize(1);
assert_eq!(Balances::free_balance(3), 0);
assert_eq!(Treasury::pot(), 100);
});
}
#[test]
fn unused_pot_should_diminish() {
new_test_ext().execute_with(|| {
let init_total_issuance = Balances::total_issuance();
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_eq!(Balances::total_issuance(), init_total_issuance + 100);
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_eq!(Treasury::pot(), 50);
assert_eq!(Balances::total_issuance(), init_total_issuance + 50);
});
}
#[test]
fn rejected_spend_proposal_ignored_on_spend_period() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3));
assert_ok!(Treasury::reject_proposal(Origin::root(), 0));
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_eq!(Balances::free_balance(3), 0);
assert_eq!(Treasury::pot(), 50);
});
}
#[test]
fn reject_already_rejected_spend_proposal_fails() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3));
assert_ok!(Treasury::reject_proposal(Origin::root(), 0));
assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::<Test, _>::InvalidIndex);
});
}
#[test]
fn reject_non_existent_spend_proposal_fails() {
new_test_ext().execute_with(|| {
assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::<Test, _>::InvalidIndex);
});
}
#[test]
fn accept_non_existent_spend_proposal_fails() {
new_test_ext().execute_with(|| {
assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::<Test, _>::InvalidIndex);
});
}
#[test]
fn accept_already_rejected_spend_proposal_fails() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3));
assert_ok!(Treasury::reject_proposal(Origin::root(), 0));
assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::<Test, _>::InvalidIndex);
});
}
#[test]
fn accepted_spend_proposal_enacted_on_spend_period() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_eq!(Treasury::pot(), 100);
assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3));
assert_ok!(Treasury::approve_proposal(Origin::root(), 0));
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_eq!(Balances::free_balance(3), 100);
assert_eq!(Treasury::pot(), 0);
});
}
#[test]
fn pot_underflow_should_not_diminish() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_eq!(Treasury::pot(), 100);
assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3));
assert_ok!(Treasury::approve_proposal(Origin::root(), 0));
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_eq!(Treasury::pot(), 100); // Pot hasn't changed
let _ = Balances::deposit_into_existing(&Treasury::account_id(), 100).unwrap();
<Treasury as OnInitialize<u64>>::on_initialize(4);
assert_eq!(Balances::free_balance(3), 150); // Fund has been spent
assert_eq!(Treasury::pot(), 25); // Pot has finally changed
});
}
// Treasury account doesn't get deleted if amount approved to spend is all its free balance.
// i.e. pot should not include existential deposit needed for account survival.
#[test]
fn treasury_account_doesnt_get_deleted() {
new_test_ext().execute_with(|| {
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_eq!(Treasury::pot(), 100);
let treasury_balance = Balances::free_balance(&Treasury::account_id());
assert_ok!(Treasury::propose_spend(Origin::signed(0), treasury_balance, 3));
assert_ok!(Treasury::approve_proposal(Origin::root(), 0));
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_eq!(Treasury::pot(), 100); // Pot hasn't changed
assert_ok!(Treasury::propose_spend(Origin::signed(0), Treasury::pot(), 3));
assert_ok!(Treasury::approve_proposal(Origin::root(), 1));
<Treasury as OnInitialize<u64>>::on_initialize(4);
assert_eq!(Treasury::pot(), 0); // Pot is emptied
assert_eq!(Balances::free_balance(Treasury::account_id()), 1); // but the account is still there
});
}
// In case treasury account is not existing then it works fine.
// This is useful for chain that will just update runtime.
#[test]
fn inexistent_account_works() {
let mut t = frame_system::GenesisConfig::default().build_storage::<Test>().unwrap();
pallet_balances::GenesisConfig::<Test> { balances: vec![(0, 100), (1, 99), (2, 1)] }
.assimilate_storage(&mut t)
.unwrap();
// Treasury genesis config is not build thus treasury account does not exist
let mut t: sp_io::TestExternalities = t.into();
t.execute_with(|| {
assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist
assert_eq!(Treasury::pot(), 0); // Pot is empty
assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3));
assert_ok!(Treasury::approve_proposal(Origin::root(), 0));
assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3));
assert_ok!(Treasury::approve_proposal(Origin::root(), 1));
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_eq!(Treasury::pot(), 0); // Pot hasn't changed
assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed
Balances::make_free_balance_be(&Treasury::account_id(), 100);
assert_eq!(Treasury::pot(), 99); // Pot now contains funds
assert_eq!(Balances::free_balance(Treasury::account_id()), 100); // Account does exist
<Treasury as OnInitialize<u64>>::on_initialize(4);
assert_eq!(Treasury::pot(), 0); // Pot has changed
assert_eq!(Balances::free_balance(3), 99); // Balance of `3` has changed
});
}
#[test]
fn propose_bounty_works() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_eq!(Treasury::pot(), 100);
assert_ok!(Treasury::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec()));
assert_eq!(last_event(), RawEvent::BountyProposed(0));
let deposit: u64 = 85 + 5;
assert_eq!(Balances::reserved_balance(0), deposit);
assert_eq!(Balances::free_balance(0), 100 - deposit);
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 0,
curator_deposit: 0,
value: 10,
bond: deposit,
status: BountyStatus::Proposed,
}
);
assert_eq!(Treasury::bounty_descriptions(0).unwrap(), b"1234567890".to_vec());
assert_eq!(Treasury::bounty_count(), 1);
});
}
#[test]
fn propose_bounty_validation_works() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_eq!(Treasury::pot(), 100);
assert_noop!(
Treasury::propose_bounty(Origin::signed(1), 0, [0; 17_000].to_vec()),
Error::<Test, _>::ReasonTooBig
);
assert_noop!(
Treasury::propose_bounty(Origin::signed(1), 10, b"12345678901234567890".to_vec()),
Error::<Test, _>::InsufficientProposersBalance
);
assert_noop!(
Treasury::propose_bounty(Origin::signed(1), 0, b"12345678901234567890".to_vec()),
Error::<Test, _>::InvalidValue
);
});
}
#[test]
fn close_bounty_works() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::<Test, _>::InvalidIndex);
assert_ok!(Treasury::propose_bounty(Origin::signed(0), 10, b"12345".to_vec()));
assert_ok!(Treasury::close_bounty(Origin::root(), 0));
let deposit: u64 = 80 + 5;
assert_eq!(last_event(), RawEvent::BountyRejected(0, deposit));
assert_eq!(Balances::reserved_balance(0), 0);
assert_eq!(Balances::free_balance(0), 100 - deposit);
assert_eq!(Treasury::bounties(0), None);
assert!(!Bounties::<Test>::contains_key(0));
assert_eq!(Treasury::bounty_descriptions(0), None);
});
}
#[test]
fn approve_bounty_works() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_noop!(Treasury::approve_bounty(Origin::root(), 0), Error::<Test, _>::InvalidIndex);
assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec()));
assert_ok!(Treasury::approve_bounty(Origin::root(), 0));
let deposit: u64 = 80 + 5;
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 0,
value: 50,
curator_deposit: 0,
bond: deposit,
status: BountyStatus::Approved,
}
);
assert_eq!(Treasury::bounty_approvals(), vec![0]);
assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::<Test, _>::UnexpectedStatus);
// deposit not returned yet
assert_eq!(Balances::reserved_balance(0), deposit);
assert_eq!(Balances::free_balance(0), 100 - deposit);
<Treasury as OnInitialize<u64>>::on_initialize(2);
// return deposit
assert_eq!(Balances::reserved_balance(0), 0);
assert_eq!(Balances::free_balance(0), 100);
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 0,
curator_deposit: 0,
value: 50,
bond: deposit,
status: BountyStatus::Funded,
}
);
assert_eq!(Treasury::pot(), 100 - 50 - 25); // burn 25
assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 50);
});
}
#[test]
fn assign_curator_works() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_noop!(
Treasury::propose_curator(Origin::root(), 0, 4, 4),
Error::<Test, _>::InvalidIndex
);
assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec()));
assert_ok!(Treasury::approve_bounty(Origin::root(), 0));
System::set_block_number(2);
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_noop!(
Treasury::propose_curator(Origin::root(), 0, 4, 50),
Error::<Test, _>::InvalidFee
);
assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4));
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 4,
curator_deposit: 0,
value: 50,
bond: 85,
status: BountyStatus::CuratorProposed { curator: 4 },
}
);
assert_noop!(
Treasury::accept_curator(Origin::signed(1), 0),
Error::<Test, _>::RequireCurator
);
assert_noop!(
Treasury::accept_curator(Origin::signed(4), 0),
pallet_balances::Error::<Test, _>::InsufficientBalance
);
Balances::make_free_balance_be(&4, 10);
assert_ok!(Treasury::accept_curator(Origin::signed(4), 0));
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 4,
curator_deposit: 2,
value: 50,
bond: 85,
status: BountyStatus::Active { curator: 4, update_due: 22 },
}
);
assert_eq!(Balances::free_balance(&4), 8);
assert_eq!(Balances::reserved_balance(&4), 2);
});
}
#[test]
fn unassign_curator_works() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec()));
assert_ok!(Treasury::approve_bounty(Origin::root(), 0));
System::set_block_number(2);
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4));
assert_noop!(Treasury::unassign_curator(Origin::signed(1), 0), BadOrigin);
assert_ok!(Treasury::unassign_curator(Origin::signed(4), 0));
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 4,
curator_deposit: 0,
value: 50,
bond: 85,
status: BountyStatus::Funded,
}
);
assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4));
Balances::make_free_balance_be(&4, 10);
assert_ok!(Treasury::accept_curator(Origin::signed(4), 0));
assert_ok!(Treasury::unassign_curator(Origin::root(), 0));
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 4,
curator_deposit: 0,
value: 50,
bond: 85,
status: BountyStatus::Funded,
}
);
assert_eq!(Balances::free_balance(&4), 8);
assert_eq!(Balances::reserved_balance(&4), 0); // slashed 2
});
}
#[test]
fn award_and_claim_bounty_works() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
Balances::make_free_balance_be(&4, 10);
assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec()));
assert_ok!(Treasury::approve_bounty(Origin::root(), 0));
System::set_block_number(2);
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4));
assert_ok!(Treasury::accept_curator(Origin::signed(4), 0));
assert_eq!(Balances::free_balance(4), 8); // inital 10 - 2 deposit
assert_noop!(
Treasury::award_bounty(Origin::signed(1), 0, 3),
Error::<Test, _>::RequireCurator
);
assert_ok!(Treasury::award_bounty(Origin::signed(4), 0, 3));
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 4,
curator_deposit: 2,
value: 50,
bond: 85,
status: BountyStatus::PendingPayout { curator: 4, beneficiary: 3, unlock_at: 5 },
}
);
assert_noop!(Treasury::claim_bounty(Origin::signed(1), 0), Error::<Test, _>::Premature);
System::set_block_number(5);
<Treasury as OnInitialize<u64>>::on_initialize(5);
assert_ok!(Balances::transfer(Origin::signed(0), Treasury::bounty_account_id(0), 10));
assert_ok!(Treasury::claim_bounty(Origin::signed(1), 0));
assert_eq!(last_event(), RawEvent::BountyClaimed(0, 56, 3));
assert_eq!(Balances::free_balance(4), 14); // initial 10 + fee 4
assert_eq!(Balances::free_balance(3), 56);
assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0);
assert_eq!(Treasury::bounties(0), None);
assert_eq!(Treasury::bounty_descriptions(0), None);
});
}
#[test]
fn claim_handles_high_fee() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
Balances::make_free_balance_be(&4, 30);
assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec()));
assert_ok!(Treasury::approve_bounty(Origin::root(), 0));
System::set_block_number(2);
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 49));
assert_ok!(Treasury::accept_curator(Origin::signed(4), 0));
assert_ok!(Treasury::award_bounty(Origin::signed(4), 0, 3));
System::set_block_number(5);
<Treasury as OnInitialize<u64>>::on_initialize(5);
// make fee > balance
let _ = Balances::slash(&Treasury::bounty_account_id(0), 10);
assert_ok!(Treasury::claim_bounty(Origin::signed(1), 0));
assert_eq!(last_event(), RawEvent::BountyClaimed(0, 0, 3));
assert_eq!(Balances::free_balance(4), 70); // 30 + 50 - 10
assert_eq!(Balances::free_balance(3), 0);
assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0);
assert_eq!(Treasury::bounties(0), None);
assert_eq!(Treasury::bounty_descriptions(0), None);
});
}
#[test]
fn cancel_and_refund() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec()));
assert_ok!(Treasury::approve_bounty(Origin::root(), 0));
System::set_block_number(2);
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_ok!(Balances::transfer(Origin::signed(0), Treasury::bounty_account_id(0), 10));
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 0,
curator_deposit: 0,
value: 50,
bond: 85,
status: BountyStatus::Funded,
}
);
assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 60);
assert_noop!(Treasury::close_bounty(Origin::signed(0), 0), BadOrigin);
assert_ok!(Treasury::close_bounty(Origin::root(), 0));
assert_eq!(Treasury::pot(), 85); //
// - 25 + 10
});
}
#[test]
fn award_and_cancel() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec()));
assert_ok!(Treasury::approve_bounty(Origin::root(), 0));
System::set_block_number(2);
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_ok!(Treasury::propose_curator(Origin::root(), 0, 0, 10));
assert_ok!(Treasury::accept_curator(Origin::signed(0), 0));
assert_eq!(Balances::free_balance(0), 95);
assert_eq!(Balances::reserved_balance(0), 5);
assert_ok!(Treasury::award_bounty(Origin::signed(0), 0, 3));
// Cannot close bounty directly when payout is happening...
assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::<Test, _>::PendingPayout);
// Instead unassign the curator to slash them and then close.
assert_ok!(Treasury::unassign_curator(Origin::root(), 0));
assert_ok!(Treasury::close_bounty(Origin::root(), 0));
assert_eq!(last_event(), RawEvent::BountyCanceled(0));
assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0);
// Slashed.
assert_eq!(Balances::free_balance(0), 95);
assert_eq!(Balances::reserved_balance(0), 0);
assert_eq!(Treasury::bounties(0), None);
assert_eq!(Treasury::bounty_descriptions(0), None);
});
}
#[test]
fn expire_and_unassign() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec()));
assert_ok!(Treasury::approve_bounty(Origin::root(), 0));
System::set_block_number(2);
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_ok!(Treasury::propose_curator(Origin::root(), 0, 1, 10));
assert_ok!(Treasury::accept_curator(Origin::signed(1), 0));
assert_eq!(Balances::free_balance(1), 93);
assert_eq!(Balances::reserved_balance(1), 5);
System::set_block_number(22);
<Treasury as OnInitialize<u64>>::on_initialize(22);
assert_noop!(Treasury::unassign_curator(Origin::signed(0), 0), Error::<Test, _>::Premature);
System::set_block_number(23);
<Treasury as OnInitialize<u64>>::on_initialize(23);
assert_ok!(Treasury::unassign_curator(Origin::signed(0), 0));
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 10,
curator_deposit: 0,
value: 50,
bond: 85,
status: BountyStatus::Funded,
}
);
assert_eq!(Balances::free_balance(1), 93);
assert_eq!(Balances::reserved_balance(1), 0); // slashed
});
}
#[test]
fn extend_expiry() {
new_test_ext().execute_with(|| {
System::set_block_number(1);
Balances::make_free_balance_be(&Treasury::account_id(), 101);
Balances::make_free_balance_be(&4, 10);
assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec()));
assert_ok!(Treasury::approve_bounty(Origin::root(), 0));
assert_noop!(
Treasury::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()),
Error::<Test, _>::UnexpectedStatus
);
System::set_block_number(2);
<Treasury as OnInitialize<u64>>::on_initialize(2);
assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 10));
assert_ok!(Treasury::accept_curator(Origin::signed(4), 0));
assert_eq!(Balances::free_balance(4), 5);
assert_eq!(Balances::reserved_balance(4), 5);
System::set_block_number(10);
<Treasury as OnInitialize<u64>>::on_initialize(10);
assert_noop!(
Treasury::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()),
Error::<Test, _>::RequireCurator
);
assert_ok!(Treasury::extend_bounty_expiry(Origin::signed(4), 0, Vec::new()));
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 10,
curator_deposit: 5,
value: 50,
bond: 85,
status: BountyStatus::Active { curator: 4, update_due: 30 },
}
);
assert_ok!(Treasury::extend_bounty_expiry(Origin::signed(4), 0, Vec::new()));
assert_eq!(
Treasury::bounties(0).unwrap(),
Bounty {
proposer: 0,
fee: 10,
curator_deposit: 5,
value: 50,
bond: 85,
status: BountyStatus::Active { curator: 4, update_due: 30 }, // still the same
}
);
System::set_block_number(25);
<Treasury as OnInitialize<u64>>::on_initialize(25);
assert_noop!(Treasury::unassign_curator(Origin::signed(0), 0), Error::<Test, _>::Premature);
assert_ok!(Treasury::unassign_curator(Origin::signed(4), 0));
assert_eq!(Balances::free_balance(4), 10); // not slashed
assert_eq!(Balances::reserved_balance(4), 0);
});
}
#[test]
fn test_last_reward_migration() {
use sp_storage::Storage;
let mut s = Storage::default();
#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)]
pub struct OldOpenTip<
AccountId: Parameter,
Balance: Parameter,
BlockNumber: Parameter,
Hash: Parameter,
> {
/// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded
/// string. A URL would be sensible.
reason: Hash,
/// The account to be tipped.
who: AccountId,
/// The account who began this tip and the amount held on deposit.
finder: Option<(AccountId, Balance)>,
/// The block number at which this tip will close if `Some`. If `None`, then no closing is
/// scheduled.
closes: Option<BlockNumber>,
/// The members who have voted for this tip. Sorted by AccountId.
tips: Vec<(AccountId, Balance)>,
}
let reason1 = BlakeTwo256::hash(b"reason1");
let hash1 = BlakeTwo256::hash_of(&(reason1, 10u64));
let old_tip_finder = OldOpenTip::<u128, u64, u64, H256> {
reason: reason1,
who: 10,
finder: Some((20, 30)),
closes: Some(13),
tips: vec![(40, 50), (60, 70)],
};
let reason2 = BlakeTwo256::hash(b"reason2");
let hash2 = BlakeTwo256::hash_of(&(reason2, 20u64));
let old_tip_no_finder = OldOpenTip::<u128, u64, u64, H256> {
reason: reason2,
who: 20,
finder: None,
closes: Some(13),
tips: vec![(40, 50), (60, 70)],
};
let data = vec![
(Tips::<Test>::hashed_key_for(hash1), old_tip_finder.encode().to_vec()),
(Tips::<Test>::hashed_key_for(hash2), old_tip_no_finder.encode().to_vec()),
];
s.top = data.into_iter().collect();
sp_io::TestExternalities::new(s).execute_with(|| {
Treasury::migrate_retract_tip_for_tip_new();
// Test w/ finder
assert_eq!(
Tips::<Test>::get(hash1),
Some(OpenTip {
reason: reason1,
who: 10,
finder: 20,
deposit: 30,
closes: Some(13),
tips: vec![(40, 50), (60, 70)],
finders_fee: true,
})
);
// Test w/o finder
assert_eq!(
Tips::<Test>::get(hash2),
Some(OpenTip {
reason: reason2,
who: 20,
finder: Default::default(),
deposit: 0,
closes: Some(13),
tips: vec![(40, 50), (60, 70)],
finders_fee: false,
})
);
});
}
#[test]
fn genesis_funding_works() {
let mut t = frame_system::GenesisConfig::default().build_storage::<Test>().unwrap();
let initial_funding = 100;
pallet_balances::GenesisConfig::<Test> {
// Total issuance will be 200 with treasury account initialized with 100.
balances: vec![(0, 100), (Treasury::account_id(), initial_funding)],
}
.assimilate_storage(&mut t)
.unwrap();
GenesisConfig::default().assimilate_storage::<Test, _>(&mut t).unwrap();
let mut t: sp_io::TestExternalities = t.into();
t.execute_with(|| {
assert_eq!(Balances::free_balance(Treasury::account_id()), initial_funding);
assert_eq!(Treasury::pot(), initial_funding - Balances::minimum_balance());
});
}
|
use crate::BoxError;
use derive_more::Display;
use thiserror::Error;
/// The Error returned by storage backends. Storage backend implementations should choose the
/// `ErrorKind` chosen for errors carefully since that will determine what is returned to the FTP
/// client.
#[derive(Debug, Error)]
#[error("storage error: {kind}")]
pub struct Error {
kind: ErrorKind,
#[source]
source: Option<BoxError>,
}
impl Error {
/// Creates a new storage error
pub fn new<E>(kind: ErrorKind, error: E) -> Error
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Error {
kind,
source: Some(error.into()),
}
}
/// Detailed information about what the FTP server should do with the failure
pub fn kind(&self) -> ErrorKind {
self.kind
}
/// Attempts to get a reference to the inner `std::io::Error` if there is one.
pub fn get_io_error(&self) -> Option<&std::io::Error> {
self.source.as_ref()?.downcast_ref::<std::io::Error>()
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error { kind, source: None }
}
}
/// The `ErrorKind` variants that can be produced by the [`StorageBackend`] implementations.
///
/// [`StorageBackend`]: trait.StorageBackend.html
#[derive(Copy, Clone, Eq, PartialEq, Debug, Display)]
pub enum ErrorKind {
/// Error that will cause an FTP reply code of 450 to be returned to the FTP client.
/// The storage back-end implementation should return this if a error occurred that my be
/// retried for example in the case where a file is busy.
#[display(fmt = "450 Transient file not available")]
TransientFileNotAvailable,
/// Error that will cause an FTP reply code of 550 to be returned to the FTP client.
/// The storage back-end implementation should return this if a error occurred where it doesn't
/// make sense for it to be retried. For example in the case where a file is busy.
#[display(fmt = "550 Permanent file not available")]
PermanentFileNotAvailable,
/// Error that will cause an FTP reply code of 550 to be returned to the FTP client.
/// The storage back-end implementation should return this if a error occurred where it doesn't
/// make sense for it to be retried. For example in the case where the directory doesn't exist
#[display(fmt = "550 Permanent directory not available")]
PermanentDirectoryNotAvailable,
/// Error that will cause an FTP reply code of 550 to be returned to the FTP client.
/// The storage back-end implementation should return this if a error occurred where it doesn't
/// make sense for it to be retried. For example in the case where file access is denied.
#[display(fmt = "550 The directory is not empty")]
PermanentDirectoryNotEmpty,
/// Error that will cause an FTP reply code of 550 to be returned to the FTP client.
/// The storage back-end implementation should return this if a error occurred where it doesn't
/// make sense for it to be retried. For example in the case where file access is denied.
#[display(fmt = "550 Permission denied")]
PermissionDenied,
/// Error that will cause an FTP reply code of 426 to be returned to the FTP client. It means the transfer was
/// aborted, possibly by the client or because of a network issue
#[display(fmt = "426 Connection closed transfer aborted")]
ConnectionClosed,
/// Error that will cause an FTP reply code of 451 to be returned to the FTP client. It means
/// the requested action was aborted due to a local error (internal storage back-end error) in
/// processing.
#[display(fmt = "451 Local error")]
LocalError,
/// 551 Requested action aborted. Page type unknown.
#[display(fmt = "551 Page type unknown")]
PageTypeUnknown,
/// 452 Requested action not taken. Insufficient storage space in system.
#[display(fmt = "452 Insufficient storage space error")]
InsufficientStorageSpaceError,
/// 552 Requested file action aborted. Exceeded storage allocation (for current directory or
/// dataset).
#[display(fmt = "552 Exceeded storage allocation error")]
ExceededStorageAllocationError,
/// Error that will cause an FTP reply code of 553 to be returned to the FTP client. Its means
/// the requested action was not taken due to an illegal file name.
#[display(fmt = "553 File name not allowed error")]
FileNameNotAllowedError,
/// Error that will cause an FTP reply code of 502. The indicates to the client that the command
/// is not implemented for the storage back-end. For instance the GCS back-end don't implement
/// RMD (remove directory) but returns this error instead from its StorageBackend::rmd
/// implementation.
#[display(fmt = "502 Command not implemented")]
CommandNotImplemented,
}
|
use anyhow::Result;
use wasm_bindgen::JsCast;
impl crate::ctx::ClientContext {
pub(crate) fn get_element_by_id(&self, id: &str) -> Result<web_sys::HtmlElement> {
self.get_element_by_id_as::<web_sys::HtmlElement>(id)
}
pub(crate) fn get_element_by_id_as<T: JsCast>(&self, id: &str) -> Result<T> {
match self.document().get_element_by_id(id) {
Some(e) => match e.dyn_into::<T>() {
Ok(el) => Ok(el),
Err(_) => Err(anyhow::anyhow!(format!("Cannot load html element with id [{}]", id)))
},
None => Err(anyhow::anyhow!(format!("Cannot load element with id [{}]", id)))
}
}
pub(crate) fn create_element<T: JsCast>(&self, tag: &str) -> Result<T> {
match self.document().create_element(tag) {
Ok(el) => el
.dyn_into::<T>()
.map_err(|_| anyhow::anyhow!(format!("Cannot cast [{}] element", tag))),
Err(_) => Err(anyhow::anyhow!(format!("Cannot create [{}] element", tag)))
}
}
pub(crate) fn append_template(&self, id: &str, tag: &str, template: maud::Markup) -> Result<web_sys::HtmlElement> {
let parent = self.get_element_by_id(id)?;
let el = self.create_element::<web_sys::HtmlElement>(tag)?;
el.set_inner_html(&template.into_string());
parent
.append_child(&el)
.map_err(|_| anyhow::anyhow!(format!("Cannot load html element with id [{}]", id)))
.map(|_| el)
}
pub(crate) fn replace_template(&self, id: &str, template: maud::Markup) -> Result<()> {
let parent = self.get_element_by_id(id)?;
parent.set_inner_html(&template.into_string());
Ok(())
}
pub(crate) fn set_visible(&self, id: &str, v: bool) -> Result<()> {
let el = self.get_element_by_id(id)?;
let style = el.style();
let _ = style.set_property("display", if v { "block" } else { "none" });
Ok(())
}
pub(crate) fn set_input_value(&self, id: &str, v: &str) -> Result<()> {
self.get_element_by_id_as::<web_sys::HtmlInputElement>(id)?.set_value(v);
Ok(())
}
pub(crate) fn get_input_value(&self, id: &str) -> Result<String> {
Ok(self.get_element_by_id_as::<web_sys::HtmlInputElement>(id)?.value())
}
}
pub(crate) fn onclick_event(t: &str, k: &str, v: &str) -> String {
format!(
"rustimate.on_event('{}', '{}', {});return false;",
t,
k,
if v.is_empty() { "''" } else { v }
)
}
|
#[macro_use]
extern crate log;
use rsocket_rust::prelude::*;
use rsocket_rust::Result;
use rsocket_rust_transport_tcp::tokio_native_tls::{native_tls, TlsConnector};
use rsocket_rust_transport_tcp::TlsClientTransport;
#[tokio::main]
async fn main() -> Result<()> {
env_logger::builder().format_timestamp_millis().init();
let pem = include_bytes!("foobar.com.pem");
let cert = native_tls::Certificate::from_pem(pem)?;
let cx = native_tls::TlsConnector::builder()
.add_root_certificate(cert)
.build()?;
let cx = TlsConnector::from(cx);
let cli = RSocketFactory::connect()
.transport(TlsClientTransport::new(
"foobar.com".into(),
"127.0.0.1:4444".parse()?,
cx,
))
.start()
.await?;
let res = cli
.request_response(Payload::builder().set_data_utf8("hello").build())
.await?;
info!("response: {:?}", res);
cli.wait_for_close().await;
Ok(())
}
|
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
pub struct PowRequest1<Rq> {
pub request: Rq,
}
#[derive(Serialize, Deserialize)]
pub struct PowRequest2<Pz> {
pub puzzle_solution: Pz,
} |
use bson::{doc, Document};
use serde::{Deserialize, Serialize, Serializer};
use std::time::Duration;
use typed_builder::TypedBuilder;
use crate::{
error::Result,
operation::append_options,
runtime,
selection_criteria::SelectionCriteria,
Client,
};
// If you write a tokio test that uses this, make sure to annotate it with
// tokio::test(flavor = "multi_thread").
// TODO RUST-1530 Make the error message here better.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct FailPoint {
#[serde(flatten)]
command: Document,
}
impl FailPoint {
fn name(&self) -> &str {
self.command.get_str("configureFailPoint").unwrap()
}
/// Create a failCommand failpoint.
/// See <https://github.com/mongodb/mongo/wiki/The-%22failCommand%22-fail-point> for more info.
pub fn fail_command(
fail_commands: &[&str],
mode: FailPointMode,
options: impl Into<Option<FailCommandOptions>>,
) -> FailPoint {
let options = options.into();
let mut data = doc! {
"failCommands": fail_commands.iter().map(|s| s.to_string()).collect::<Vec<String>>(),
};
append_options(&mut data, options.as_ref()).unwrap();
let command = doc! {
"configureFailPoint": "failCommand",
"mode": bson::to_bson(&mode).unwrap(),
"data": data,
};
FailPoint { command }
}
pub async fn enable(
&self,
client: &Client,
criteria: impl Into<Option<SelectionCriteria>>,
) -> Result<FailPointGuard> {
let criteria = criteria.into();
client
.database("admin")
.run_command(self.command.clone(), criteria.clone())
.await?;
Ok(FailPointGuard {
failpoint_name: self.name().to_string(),
client: client.clone(),
criteria,
})
}
}
#[derive(Debug)]
pub struct FailPointGuard {
client: Client,
failpoint_name: String,
criteria: Option<SelectionCriteria>,
}
impl Drop for FailPointGuard {
fn drop(&mut self) {
let client = self.client.clone();
let name = self.failpoint_name.clone();
let result = runtime::block_on(async move {
client
.database("admin")
.run_command(
doc! { "configureFailPoint": name, "mode": "off" },
self.criteria.clone(),
)
.await
});
if let Err(e) = result {
println!("failed disabling failpoint: {:?}", e);
}
}
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
#[allow(unused)]
pub enum FailPointMode {
AlwaysOn,
Times(i32),
Skip(i32),
Off,
ActivationProbability(f32),
}
#[serde_with::skip_serializing_none]
#[derive(Debug, Default, TypedBuilder, Serialize)]
#[builder(field_defaults(default, setter(into)))]
#[serde(rename_all = "camelCase")]
pub struct FailCommandOptions {
/// The appName that a client must use in order to hit this fail point.
app_name: Option<String>,
/// If non-null, how long the server should block the affected commands.
/// Only available in 4.2.9+.
#[serde(serialize_with = "serialize_block_connection")]
#[serde(flatten)]
block_connection: Option<Duration>,
/// Whether the server should hang up when the client sends an affected command
close_connection: Option<bool>,
/// The error code to include in the server's reply to an affected command.
error_code: Option<i64>,
/// Array of error labels to be included in the server's reply to an affected command. Passing
/// in an empty array suppresses all error labels that would otherwise be returned by the
/// server. The existence of the "errorLabels" field in the failCommand failpoint completely
/// overrides the server's normal error labels adding behaviors for the affected commands.
/// Only available in 4.4+.
error_labels: Option<Vec<String>>,
/// Document to be returned as a write concern error.
write_concern_error: Option<Document>,
}
fn serialize_block_connection<S: Serializer>(
val: &Option<Duration>,
serializer: S,
) -> std::result::Result<S::Ok, S::Error> {
match val {
Some(duration) => {
(doc! { "blockConnection": true, "blockTimeMS": duration.as_millis() as i64})
.serialize(serializer)
}
None => serializer.serialize_none(),
}
}
|
#[doc = "Register `ICR` writer"]
pub type W = crate::W<ICR_SPEC>;
#[doc = "Field `FLT1C` writer - Fault 1 Interrupt Flag Clear"]
pub type FLT1C_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FLT2C` writer - Fault 2 Interrupt Flag Clear"]
pub type FLT2C_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FLT3C` writer - Fault 3 Interrupt Flag Clear"]
pub type FLT3C_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FLT4C` writer - Fault 4 Interrupt Flag Clear"]
pub type FLT4C_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FLT5C` writer - Fault 5 Interrupt Flag Clear"]
pub type FLT5C_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SYSFLTC` writer - System Fault Interrupt Flag Clear"]
pub type SYSFLTC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FLT6C` writer - Fault 6 Interrupt Flag Clear"]
pub type FLT6C_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DLLRDYC` writer - DLL Ready Interrupt flag Clear"]
pub type DLLRDYC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `BMPERC` writer - Burst mode period flag Clear"]
pub type BMPERC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl W {
#[doc = "Bit 0 - Fault 1 Interrupt Flag Clear"]
#[inline(always)]
#[must_use]
pub fn flt1c(&mut self) -> FLT1C_W<ICR_SPEC, 0> {
FLT1C_W::new(self)
}
#[doc = "Bit 1 - Fault 2 Interrupt Flag Clear"]
#[inline(always)]
#[must_use]
pub fn flt2c(&mut self) -> FLT2C_W<ICR_SPEC, 1> {
FLT2C_W::new(self)
}
#[doc = "Bit 2 - Fault 3 Interrupt Flag Clear"]
#[inline(always)]
#[must_use]
pub fn flt3c(&mut self) -> FLT3C_W<ICR_SPEC, 2> {
FLT3C_W::new(self)
}
#[doc = "Bit 3 - Fault 4 Interrupt Flag Clear"]
#[inline(always)]
#[must_use]
pub fn flt4c(&mut self) -> FLT4C_W<ICR_SPEC, 3> {
FLT4C_W::new(self)
}
#[doc = "Bit 4 - Fault 5 Interrupt Flag Clear"]
#[inline(always)]
#[must_use]
pub fn flt5c(&mut self) -> FLT5C_W<ICR_SPEC, 4> {
FLT5C_W::new(self)
}
#[doc = "Bit 5 - System Fault Interrupt Flag Clear"]
#[inline(always)]
#[must_use]
pub fn sysfltc(&mut self) -> SYSFLTC_W<ICR_SPEC, 5> {
SYSFLTC_W::new(self)
}
#[doc = "Bit 6 - Fault 6 Interrupt Flag Clear"]
#[inline(always)]
#[must_use]
pub fn flt6c(&mut self) -> FLT6C_W<ICR_SPEC, 6> {
FLT6C_W::new(self)
}
#[doc = "Bit 16 - DLL Ready Interrupt flag Clear"]
#[inline(always)]
#[must_use]
pub fn dllrdyc(&mut self) -> DLLRDYC_W<ICR_SPEC, 16> {
DLLRDYC_W::new(self)
}
#[doc = "Bit 17 - Burst mode period flag Clear"]
#[inline(always)]
#[must_use]
pub fn bmperc(&mut self) -> BMPERC_W<ICR_SPEC, 17> {
BMPERC_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Interrupt Clear Register\n\nYou can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`icr::W`](W). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct ICR_SPEC;
impl crate::RegisterSpec for ICR_SPEC {
type Ux = u32;
}
#[doc = "`write(|w| ..)` method takes [`icr::W`](W) writer structure"]
impl crate::Writable for ICR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets ICR to value 0"]
impl crate::Resettable for ICR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
extern crate diesel;
extern crate UserManagerCrud;
use self::diesel::prelude::*;
use self::UserManagerCrud::models::*;
use self::UserManagerCrud::*;
use std::io::stdin;
mod lambda_crypt;
fn main() {
use UserManagerCrud::schema::users::dsl::*;
println!("Put you email here:");
let mut emailin = String::new();
stdin().read_line(&mut emailin).unwrap();
let emailin = &emailin[..(emailin.len() - 1)]; // Drop the newline character
println!("Put you password here:");
let mut passwordin = String::new();
stdin().read_line(&mut passwordin).unwrap();
let passwordin = &passwordin[..(passwordin.len() - 1)]; // Drop the newline character
let cryptpass = lambda_crypt::get_hash(passwordin);
// use schema::users;
let connection = establish_connection();
let result = users.filter(email.eq(&emailin)).filter(password.eq(&cryptpass))
.load::<User>(&connection)
.expect("Check user query failed to run");
println!("Result : {:?}",result.len());
}
|
fn main() {
let mut dir = [1, 0];
let mut coords = [0, 0];
String::from_utf8(std::fs::read("input/day12").unwrap())
.unwrap()
.split_terminator("\n")
.map(|n| (n.chars().next().unwrap(), n[1..].parse::<i32>().unwrap()))
.for_each(|(key, value)| match key {
'N' => coords[1] += value,
'S' => coords[1] -= value,
'E' => coords[0] += value,
'W' => coords[0] -= value,
'F' => coords = [coords[0] + value * dir[0], coords[1] + value * dir[1]],
key => {
let value = if key == 'L' { 1 } else { 3 } * (value / 90) % 4;
let mut rotate_left = || dir = [-dir[1], dir[0]];
for _ in 0..value {
rotate_left();
}
}
});
let answer = coords[0].abs() + coords[1].abs();
println!("{}", answer);
}
|
use gtk::{ButtonsType, DialogFlags, Inhibit, MessageDialog, MessageType, Window};
use gtk::Orientation::{Horizontal, Vertical};
use gtk::prelude::*;
use rand::Rng;
use relm_derive::{Msg, widget};
use relm::{Channel, Component, Relm, Sender, Widget, init};
use self::WinMsg::*;
pub struct HeaderModel {
tx: Sender<WinMsg>,
}
#[widget]
impl Widget for Header {
fn model(_: &Relm<Self>, tx: Sender<WinMsg>) -> HeaderModel {
HeaderModel {
tx,
}
}
fn update(&mut self, event: WinMsg) {
self.model.tx.send(event).expect("Event to be handled by parent.")
}
view! {
#[name="titlebar"]
gtk::HeaderBar {
title: Some("Slot Machine"),
show_close_button: true,
gtk::Button {
clicked => Spin,
label: "Spin",
},
gtk::Button {
clicked => NewGame,
label: "New Game",
},
}
}
}
pub struct Model {
header: Component<Header>,
is_game_over: bool,
is_new: bool,
tokens: u32,
wheel1: u32,
wheel2: u32,
wheel3: u32,
payout: u32,
}
impl Model {
fn payout_alert(&self) -> String {
format!("You got {} tokens!", self.payout)
}
fn payout_text(&self) -> String {
format!("Payout: {}", self.payout)
}
fn token_text(&self) -> String {
format!("Tokens: {}", self.tokens)
}
}
#[derive(Msg)]
pub enum WinMsg {
NewGame,
Quit,
Spin,
}
#[widget]
impl Widget for Win {
fn model(relm: &Relm<Self>, _:()) -> Model {
let stream = relm.stream().clone();
let (_, tx) = Channel::new(move |num| {
stream.emit(num);
});
let header = init::<Header>(tx).expect("Header");
Model {
header,
is_new: true,
is_game_over: false,
tokens: 100,
wheel1: 0,
wheel2: 0,
wheel3: 0,
payout: 0,
}
}
fn init_view(&mut self) {
self.window.resize(640, 480);
}
fn update(&mut self, event: WinMsg) {
match event {
NewGame => {
self.model.is_new = true;
self.model.is_game_over = false;
self.model.tokens = 100;
self.model.wheel1 = 0;
self.model.wheel2 = 0;
self.model.wheel3 = 0;
self.model.payout = 0;
self.payout.set_text(&self.model.payout_text());
self.tokens.set_text(&self.model.token_text());
},
Quit => gtk::main_quit(),
Spin => {
if self.model.is_new {
self.model.is_new = false;
}
if self.model.tokens > 0 {
let mut rng = rand::thread_rng();
self.model.tokens -= 1;
self.model.wheel1 = rng.gen_range(1, 3);
self.model.wheel2 = rng.gen_range(1, 3);
self.model.wheel3 = rng.gen_range(1, 3);
// 1 - 1 - 1 (3) += 4
// 2 - 2 - 2 (6) += 8
// 3 - 3 - 3 (9) += 12
if self.model.wheel1 == self.model.wheel2 && self.model.wheel2 == self.model.wheel3 {
let value = self.model.wheel1 + self.model.wheel2 + self.model.wheel3;
self.model.payout = (value / 3) * 4;
self.model.tokens += self.model.payout;
} else {
self.model.payout = 0;
}
self.payout.set_text(&self.model.payout_text());
self.tokens.set_text(&self.model.token_text());
if self.model.payout > 0 {
let dialog = MessageDialog::new(None::<&Window>,
DialogFlags::empty(),
MessageType::Info,
ButtonsType::Ok,
&self.model.payout_alert());
dialog.run();
dialog.destroy();
}
if self.model.tokens <= 0 {
self.model.is_game_over = true;
}
} else {
self.model.is_game_over = true;
}
},
}
}
view! {
#[name="window"]
gtk::Window {
titlebar: Some(self.model.header.widget()),
#[name="app"]
gtk::Box {
orientation: Vertical,
homogeneous: true,
#[name="state"]
gtk::Label {
text: "Game Over",
visible: self.model.is_game_over,
},
#[name="placeholder"]
gtk::Label {
text: "Press Spin!",
visible: self.model.is_new,
},
#[name="wheels"]
gtk::Box {
orientation: Horizontal,
margin_top: 64,
margin_bottom: 64,
homogeneous: true,
visible: !self.model.is_new,
gtk::Label {
text: &self.model.wheel1.to_string(),
},
gtk::Label {
text: &self.model.wheel2.to_string(),
},
gtk::Label {
text: &self.model.wheel3.to_string(),
},
},
#[name="tokens"]
gtk::Label {
text: &self.model.token_text(),
},
#[name="payout"]
gtk::Label {
text: &self.model.payout_text(),
visible: !self.model.is_new,
},
},
delete_event(_, _) => (Quit, Inhibit(false)),
}
}
}
fn main() {
Win::run(()).expect("Window::run");
}
|
pub mod hackernews;
pub mod reddit;
pub mod twitter;
|
use crate::error::NiaServerError;
use crate::error::NiaServerResult;
use crate::protocol::Serializable;
use nia_protocol_rust::RemoveDeviceByNameRequest;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct NiaRemoveDeviceByNameRequest {
device_name: String,
}
impl NiaRemoveDeviceByNameRequest {
pub fn new<S>(device_name: S) -> NiaRemoveDeviceByNameRequest
where
S: Into<String>,
{
NiaRemoveDeviceByNameRequest {
device_name: device_name.into(),
}
}
pub fn get_device_name(self) -> String {
self.device_name
}
}
impl
Serializable<
NiaRemoveDeviceByNameRequest,
nia_protocol_rust::RemoveDeviceByNameRequest,
> for NiaRemoveDeviceByNameRequest
{
fn to_pb(&self) -> RemoveDeviceByNameRequest {
let mut remove_device_by_name_request_pb =
nia_protocol_rust::RemoveDeviceByNameRequest::new();
remove_device_by_name_request_pb
.set_device_name(protobuf::Chars::from(self.device_name.clone()));
remove_device_by_name_request_pb
}
fn from_pb(
object_pb: RemoveDeviceByNameRequest,
) -> NiaServerResult<NiaRemoveDeviceByNameRequest> {
let remove_device_by_name_request =
NiaRemoveDeviceByNameRequest::new(object_pb.get_device_name());
Ok(remove_device_by_name_request)
}
}
#[cfg(test)]
mod tests {
#[allow(unused_imports)]
use super::*;
#[test]
fn serializes_and_deserializes() {
let expected = NiaRemoveDeviceByNameRequest::new("Corsair kyboard");
let bytes = expected.to_bytes().unwrap();
let result = NiaRemoveDeviceByNameRequest::from_bytes(bytes).unwrap();
assert_eq!(expected, result)
}
}
|
#[doc = "Reader of register DDRPHYC_PIR"]
pub type R = crate::R<u32, super::DDRPHYC_PIR>;
#[doc = "Writer for register DDRPHYC_PIR"]
pub type W = crate::W<u32, super::DDRPHYC_PIR>;
#[doc = "Register DDRPHYC_PIR `reset()`'s with value 0"]
impl crate::ResetValue for super::DDRPHYC_PIR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Write proxy for field `INIT`"]
pub struct INIT_W<'a> {
w: &'a mut W,
}
impl<'a> INIT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Write proxy for field `DLLSRST`"]
pub struct DLLSRST_W<'a> {
w: &'a mut W,
}
impl<'a> DLLSRST_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Write proxy for field `DLLLOCK`"]
pub struct DLLLOCK_W<'a> {
w: &'a mut W,
}
impl<'a> DLLLOCK_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Write proxy for field `ZCAL`"]
pub struct ZCAL_W<'a> {
w: &'a mut W,
}
impl<'a> ZCAL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Write proxy for field `ITMSRST`"]
pub struct ITMSRST_W<'a> {
w: &'a mut W,
}
impl<'a> ITMSRST_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Write proxy for field `DRAMRST`"]
pub struct DRAMRST_W<'a> {
w: &'a mut W,
}
impl<'a> DRAMRST_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Write proxy for field `DRAMINIT`"]
pub struct DRAMINIT_W<'a> {
w: &'a mut W,
}
impl<'a> DRAMINIT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Write proxy for field `QSTRN`"]
pub struct QSTRN_W<'a> {
w: &'a mut W,
}
impl<'a> QSTRN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Write proxy for field `RVTRN`"]
pub struct RVTRN_W<'a> {
w: &'a mut W,
}
impl<'a> RVTRN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Write proxy for field `ICPC`"]
pub struct ICPC_W<'a> {
w: &'a mut W,
}
impl<'a> ICPC_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Write proxy for field `DLLBYP`"]
pub struct DLLBYP_W<'a> {
w: &'a mut W,
}
impl<'a> DLLBYP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Write proxy for field `CTLDINIT`"]
pub struct CTLDINIT_W<'a> {
w: &'a mut W,
}
impl<'a> CTLDINIT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Write proxy for field `CLRSR`"]
pub struct CLRSR_W<'a> {
w: &'a mut W,
}
impl<'a> CLRSR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 28)) | (((value as u32) & 0x01) << 28);
self.w
}
}
#[doc = "Write proxy for field `LOCKBYP`"]
pub struct LOCKBYP_W<'a> {
w: &'a mut W,
}
impl<'a> LOCKBYP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 29)) | (((value as u32) & 0x01) << 29);
self.w
}
}
#[doc = "Write proxy for field `ZCALBYP`"]
pub struct ZCALBYP_W<'a> {
w: &'a mut W,
}
impl<'a> ZCALBYP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30);
self.w
}
}
#[doc = "Write proxy for field `INITBYP`"]
pub struct INITBYP_W<'a> {
w: &'a mut W,
}
impl<'a> INITBYP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31);
self.w
}
}
impl R {}
impl W {
#[doc = "Bit 0 - INIT"]
#[inline(always)]
pub fn init(&mut self) -> INIT_W {
INIT_W { w: self }
}
#[doc = "Bit 1 - DLLSRST"]
#[inline(always)]
pub fn dllsrst(&mut self) -> DLLSRST_W {
DLLSRST_W { w: self }
}
#[doc = "Bit 2 - DLLLOCK"]
#[inline(always)]
pub fn dlllock(&mut self) -> DLLLOCK_W {
DLLLOCK_W { w: self }
}
#[doc = "Bit 3 - ZCAL"]
#[inline(always)]
pub fn zcal(&mut self) -> ZCAL_W {
ZCAL_W { w: self }
}
#[doc = "Bit 4 - ITMSRST"]
#[inline(always)]
pub fn itmsrst(&mut self) -> ITMSRST_W {
ITMSRST_W { w: self }
}
#[doc = "Bit 5 - DRAMRST"]
#[inline(always)]
pub fn dramrst(&mut self) -> DRAMRST_W {
DRAMRST_W { w: self }
}
#[doc = "Bit 6 - DRAMINIT"]
#[inline(always)]
pub fn draminit(&mut self) -> DRAMINIT_W {
DRAMINIT_W { w: self }
}
#[doc = "Bit 7 - QSTRN"]
#[inline(always)]
pub fn qstrn(&mut self) -> QSTRN_W {
QSTRN_W { w: self }
}
#[doc = "Bit 8 - RVTRN"]
#[inline(always)]
pub fn rvtrn(&mut self) -> RVTRN_W {
RVTRN_W { w: self }
}
#[doc = "Bit 16 - ICPC"]
#[inline(always)]
pub fn icpc(&mut self) -> ICPC_W {
ICPC_W { w: self }
}
#[doc = "Bit 17 - DLLBYP"]
#[inline(always)]
pub fn dllbyp(&mut self) -> DLLBYP_W {
DLLBYP_W { w: self }
}
#[doc = "Bit 18 - CTLDINIT"]
#[inline(always)]
pub fn ctldinit(&mut self) -> CTLDINIT_W {
CTLDINIT_W { w: self }
}
#[doc = "Bit 28 - CLRSR"]
#[inline(always)]
pub fn clrsr(&mut self) -> CLRSR_W {
CLRSR_W { w: self }
}
#[doc = "Bit 29 - LOCKBYP"]
#[inline(always)]
pub fn lockbyp(&mut self) -> LOCKBYP_W {
LOCKBYP_W { w: self }
}
#[doc = "Bit 30 - ZCALBYP"]
#[inline(always)]
pub fn zcalbyp(&mut self) -> ZCALBYP_W {
ZCALBYP_W { w: self }
}
#[doc = "Bit 31 - INITBYP"]
#[inline(always)]
pub fn initbyp(&mut self) -> INITBYP_W {
INITBYP_W { w: self }
}
}
|
// This is the main function
// fn
fn main() {
println!("Hello World!");
}
|
fn main() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
// fol loop automatically takes ownership of v_iter and
// makes it mutable behind the scenes. This is needed because
// v_iter keeps some internal state (current item) which needs
// to be kept up to date
for val in v1_iter {
println!("Got: {}", val);
}
// when invoking `next` manually, we need to explicitly
// make the iterator mutable
let v2 = vec![1, 2, 3];
let mut v2_iter = v2.iter();
assert_eq!(v2_iter.next(), Some(&1));
assert_eq!(v2_iter.next(), Some(&2));
assert_eq!(v2_iter.next(), Some(&3));
assert_eq!(v2_iter.next(), None);
// iter vs into_iter: https://stackoverflow.com/a/34745885/447661
// (TODO: grok)
}
|
use crate::plan::global::NoCopy;
use crate::plan::global::Plan;
use crate::policy::space::Space;
use crate::scheduler::gc_work::*;
use crate::util::Address;
use crate::util::ObjectReference;
use crate::vm::VMBinding;
use crate::MMTK;
use std::ops::{Deref, DerefMut};
use super::MarkSweep;
pub struct MSProcessEdges<VM: VMBinding> {
plan: &'static MarkSweep<VM>,
base: ProcessEdgesBase<MSProcessEdges<VM>>,
}
impl<VM: VMBinding> ProcessEdgesWork for MSProcessEdges<VM> {
type VM = VM;
const OVERWRITE_REFERENCE: bool = false;
fn new(edges: Vec<Address>, _roots: bool, mmtk: &'static MMTK<VM>) -> Self {
let base = ProcessEdgesBase::new(edges, mmtk);
let plan = base.plan().downcast_ref::<MarkSweep<VM>>().unwrap();
Self { plan, base }
}
#[inline]
fn trace_object(&mut self, object: ObjectReference) -> ObjectReference {
if object.is_null() {
return object;
}
trace!("Tracing object {}", object);
if self.plan.ms_space().in_space(object) {
self.plan.ms_space().trace_object::<Self>(self, object)
} else {
self.plan
.common()
.trace_object::<Self, NoCopy<VM>>(self, object)
}
}
}
impl<VM: VMBinding> Deref for MSProcessEdges<VM> {
type Target = ProcessEdgesBase<Self>;
#[inline]
fn deref(&self) -> &Self::Target {
&self.base
}
}
impl<VM: VMBinding> DerefMut for MSProcessEdges<VM> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.base
}
}
|
use serde::Deserialize;
use serde_state::DeserializeState;
use necsim_core_bond::{Partition, PositiveF64};
#[derive(Debug)]
#[allow(clippy::module_name_repetitions)]
pub struct MonolithicArguments {
pub parallelism_mode: ParallelismMode,
}
impl<'de> DeserializeState<'de, Partition> for MonolithicArguments {
fn deserialize_state<D>(partition: &mut Partition, deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let raw = MonolithicArgumentsRaw::deserialize_state(partition, deserializer)?;
let parallelism_mode = match raw.parallelism_mode {
Some(parallelism_mode) => parallelism_mode,
None => {
if partition.partitions().get() > 1 {
ParallelismMode::OptimisticLockstep
} else {
ParallelismMode::Monolithic
}
},
};
Ok(MonolithicArguments { parallelism_mode })
}
}
#[derive(Default, Debug, DeserializeState)]
#[serde(default, deny_unknown_fields)]
#[serde(deserialize_state = "Partition")]
struct MonolithicArgumentsRaw {
#[serde(deserialize_state)]
parallelism_mode: Option<ParallelismMode>,
}
#[derive(Debug, Deserialize)]
pub struct OptimisticParallelismMode {
pub delta_sync: PositiveF64,
}
#[derive(Debug, Deserialize)]
pub struct AveragingParallelismMode {
pub delta_sync: PositiveF64,
}
#[derive(Debug, Deserialize)]
pub enum ParallelismMode {
Monolithic,
Optimistic(OptimisticParallelismMode),
Lockstep,
OptimisticLockstep,
Averaging(AveragingParallelismMode),
}
impl<'de> DeserializeState<'de, Partition> for ParallelismMode {
fn deserialize_state<D>(partition: &mut Partition, deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
use serde::de::Error;
let parallelism_mode = ParallelismMode::deserialize(deserializer)?;
match parallelism_mode {
ParallelismMode::Monolithic if partition.partitions().get() > 1 => {
Err(D::Error::custom(format!(
"parallelism_mode {:?} is incompatible with non-monolithic partitioning.",
parallelism_mode
)))
},
ParallelismMode::Optimistic(..)
| ParallelismMode::Lockstep
| ParallelismMode::OptimisticLockstep
| ParallelismMode::Averaging(..)
if partition.partitions().get() == 1 =>
{
Err(D::Error::custom(format!(
"parallelism_mode {:?} is incompatible with monolithic partitioning.",
parallelism_mode
)))
},
partition_mode => Ok(partition_mode),
}
}
}
|
use std::fs;
use std::env;
use std::collections::HashMap;
struct JoltageDifferenceCalculator {
adapters: Vec<i32>,
}
impl JoltageDifferenceCalculator {
fn calculate(&mut self) -> i32 {
self.adapters.sort();
println!("{:?}", self.adapters);
let mut current_joltage = 0;
let mut one_diff = 0;
let mut three_diff = 1;
for adapter in self.adapters.iter() {
let diff = adapter - current_joltage;
if diff % 3 == 0 {
three_diff += 1;
} else if diff % 2 == 0 {
} else {
one_diff += 1;
}
current_joltage = *adapter;
}
println!("{}", self.adapters.len());
println!("one diff: {}, three diff: {}", one_diff, three_diff);
println!("{}", self.adapters.iter().sum::<i32>());
one_diff * three_diff
}
}
struct DistinctAdapterCombinationCountCalculator {
adapters: Vec<i32>,
}
impl DistinctAdapterCombinationCountCalculator {
fn calculate(&mut self) -> u128 {
let mut history: HashMap<i32, u128> = HashMap::new();
self.adapters.push(0);
self.adapters.push(self.adapters.iter().max().unwrap() + 3);
self.adapters.sort();
self.count_next_options(0, &mut history)
}
fn count_next_options(&self, index: i32, history: &mut HashMap<i32, u128>) -> u128 {
if index == (self.adapters.len() as i32 - 1) {
return 1;
}
if history.get(&index).is_some() {
return *history.get(&index).unwrap();
}
let mut total: u128 = 0;
let start_index = (index + 1) as usize;
let adapter = self.adapters[index as usize];
for (mid_index, mid_adapter) in self.adapters[start_index ..].iter().enumerate() {
if *mid_adapter - adapter > 3 || *mid_adapter == adapter {
break;
}
let next_index = start_index as i32 + mid_index as i32;
total += self.count_next_options(next_index, history);
}
history.insert(index, total);
total
}
}
struct Input {
filename: String,
}
impl Input {
fn new(filename: String) -> Input {
Input { filename }
}
fn get_jolt_difference_calculator(&self) -> JoltageDifferenceCalculator {
let contents = self.file_contents();
JoltageDifferenceCalculator { adapters: contents.lines().map(|line| line.parse::<i32>().unwrap()).collect::<Vec<i32>>() }
}
fn get_distinct_combi_calculator(&self) -> DistinctAdapterCombinationCountCalculator {
let contents = self.file_contents();
DistinctAdapterCombinationCountCalculator {
adapters: contents.lines().map(|line| line.parse::<i32>().unwrap()).collect::<Vec<i32>>()
}
}
fn file_contents(&self) -> String {
println!("Loading contents from file: {}", self.filename);
return fs::read_to_string(&self.filename).expect("Something went wrong loading contents from file");
}
}
fn main() {
let input_filename = env::args().nth(1).unwrap_or("input.txt".to_string());
let input = Input::new(input_filename);
let mut joltage_difference_calculator = input.get_jolt_difference_calculator();
let mut distinct_combi_calculator = input.get_distinct_combi_calculator();
println!("Answer one: {}", joltage_difference_calculator.calculate());
println!("Answer two: {}", distinct_combi_calculator.calculate());
}
|
use std::fs::{File, DirBuilder, read_dir, metadata, remove_file};
use std::path::Path;
use std::error::Error;
use std::io::prelude::*;
use std::time::{Duration, SystemTime};
use std::thread::{self, sleep};
use iron::prelude::*;
use uuid::Uuid;
use serde_json::Value;
use multipart::server::{Multipart, Entries, SaveResult, SavedFile};
use schedule::{Agenda, Job};
use regex::{Regex, Captures};
use common::http::*;
use common::utils::get_file_ext;
use common::lazy_static::{CONFIG_TABLE, UPLOAD_PATH, UPLOAD_TEMP_PATH, UPLOAD_ASSETS_PATH};
pub fn create_upload_folder() {
DirBuilder::new()
.recursive(true)
.create(&*UPLOAD_TEMP_PATH).unwrap();
DirBuilder::new()
.recursive(true)
.create(&*UPLOAD_ASSETS_PATH).unwrap();
}
pub fn upload_file(req: &mut Request) -> IronResult<Response> {
match Multipart::from_request(req) {
Ok(mut multipart) => {
match multipart.save().temp() {
SaveResult::Full(entries) => process_entries(entries),
SaveResult::Partial(_entries, _reason) => {
respond_text("部分保存成功")
}
SaveResult::Error(_err) => {
respond_text("保存失败")
}
}
}
_ => {
respond_text("上传出错")
}
}
}
fn process_entries(entries: Entries) -> IronResult<Response> {
let mut temp_file_list = vec![];
for (_name, files) in entries.files {
for file in files {
create_temp_file(&file, &mut temp_file_list);
}
}
let mut data = JsonData::new();
data.data = json!(&temp_file_list);
respond_json(&data)
}
fn create_temp_file(saved_file: &SavedFile, temp_file_list: &mut Vec<Value> ) {
let original_filename = &*saved_file.filename.clone().unwrap();
let ext = get_file_ext(original_filename).unwrap_or("");
let uuid_filename = Uuid::new_v4().to_string() + "." + ext;
let dest_path = UPLOAD_TEMP_PATH.to_owned() + "/" + &*uuid_filename;
let path = Path::new(&dest_path);
let dest_name = path.display();
let mut data = Vec::new();
let mut temp_file = match File::open(&saved_file.path) {
Ok(file) => file,
Err(err) => panic!("can't open file: {}", err.description())
};
temp_file_list.push(json!({
"filename": saved_file.filename.clone().unwrap(),
"path": &path.to_owned()
}));
temp_file.read_to_end(&mut data).expect("unable to read data");
let mut new_file = match File::create(&path) {
Ok(file) => file,
Err(err) => panic!("can't create file {}: {}", dest_name, err.description())
};
match new_file.write_all(&data) {
Ok(_) => (),
Err(err) => panic!("can't wrote to file {}: {}", dest_path, err.description())
}
}
pub fn run_clean_temp_task() {
let upload_config = CONFIG_TABLE.get("upload").unwrap().as_table().unwrap();
let ttl = upload_config.get("clean_temp_dir_ttl").unwrap().as_integer().unwrap() as u64;
let upload_temp_path = upload_config.get("temp_path").unwrap().as_str().unwrap();
thread::Builder::new()
.name("run_clean_temp_task".to_string())
.stack_size(4 * 1024 * 1024)
.spawn(move || {
let mut agenda = Agenda::new();
let temp_dir_path = Path::new(&*upload_temp_path);
agenda.add(Job::new(move || {
let now = SystemTime::now();
let one_day = Duration::from_millis(1000 * 60 * 60 * 24);
for file_wrapper in read_dir(&temp_dir_path).unwrap() {
let file = file_wrapper.unwrap();
let file_path = file.path();
let create_time = metadata(&file_path).unwrap().created().unwrap();
if now.duration_since(create_time).unwrap() > one_day { // 已创建但未保存时间超过一天
remove_file(&file_path).unwrap();
}
}
}, "* * * * * *".parse().unwrap()));
loop {
agenda.run_pending();
sleep(Duration::from_millis(ttl));
}
}).unwrap();
}
/// 将临时文件夹中的相关文件,剪切到UPLOAD_ASSETS_PATH文件夹中
pub fn sync_upload_file(content: &str) -> String {
let upload_temp_path = UPLOAD_PATH.to_owned() + "/" + &*UPLOAD_TEMP_PATH.to_owned() + "/";
let upload_assets_path = UPLOAD_PATH.to_owned() + "/" + &*UPLOAD_ASSETS_PATH.to_owned() + "/";
let reg_str = format!("\\({0}([-._0-9a-zA-Z]+).?\\)", upload_temp_path);
let reg = Regex::new(&*reg_str).unwrap();
let mut files: Vec<String> = Vec::new();
let new_content = reg.replace_all(&content, |caps: &Captures| {
let filename = caps.get(1).unwrap().as_str();
files.push(filename.to_owned());
format!("({0}{1})", upload_assets_path, filename)
});
for filename in files {
let source_str = UPLOAD_TEMP_PATH.to_owned() + "/" + &*filename;
let dest_str = UPLOAD_ASSETS_PATH.to_owned() + "/" + &*filename;
{
let source_path = Path::new(&*source_str);
let dest_path = Path::new(&*dest_str);
copy_and_delete_file(&*source_path, &*dest_path);
}
}
new_content.to_string()
}
fn copy_and_delete_file(source_path: &Path, dest_path: &Path) {
let mut data = Vec::new();
let mut temp_file = match File::open(source_path) {
Ok(file) => file,
Err(err) => panic!("can't open file: {}", err.description())
};
temp_file.read_to_end(&mut data).expect("unable to read data");
let mut new_file = match File::create(dest_path) {
Ok(file) => file,
Err(err) => panic!("can't create file {}", err.description())
};
match new_file.write_all(&data) {
Ok(_) => {
remove_file(source_path).unwrap();
()
},
Err(err) => panic!("can't wrote to file {:?}: {}", dest_path, err.description())
}
}
|
mod mesh;
mod mesh_grouper;
mod sculpt;
pub use self::mesh::{Mesh, Vertex, Instance};
pub use self::mesh_grouper::{MeshGrouper, GroupChange};
pub use self::sculpt::{SculptLine, Surface, SpannedSurface, FlatSurface, Sculpture, SkeletonSpine, RoofSurface, GableSurface}; |
#![feature(used)]
#![no_std]
extern crate cortex_m_semihosting;
#[cfg(not(feature = "use_semihosting"))]
extern crate panic_abort;
#[cfg(feature = "use_semihosting")]
extern crate panic_semihosting;
extern crate cortex_m;
extern crate cortex_m_rt;
extern crate atsamd21_hal;
extern crate metro_m0;
use metro_m0::clock::GenericClockController;
use metro_m0::delay::Delay;
use metro_m0::{CorePeripherals, Peripherals};
extern crate hd44780_driver;
use hd44780_driver::HD44780;
fn main() {
let mut peripherals = Peripherals::take().unwrap();
let core = CorePeripherals::take().unwrap();
let mut clocks = GenericClockController::new(
peripherals.GCLK,
&mut peripherals.PM,
&mut peripherals.SYSCTRL,
&mut peripherals.NVMCTRL,
);
let mut pins = metro_m0::pins(peripherals.PORT);
let delay = Delay::new(core.SYST, &mut clocks);
let mut lcd = HD44780::new_8bit(
pins.d4.into_open_drain_output(&mut pins.port), // Register Select pin
pins.d3.into_open_drain_output(&mut pins.port), // Enable pin
pins.d5.into_open_drain_output(&mut pins.port), // d0
pins.d6.into_open_drain_output(&mut pins.port), // d1
pins.d7.into_open_drain_output(&mut pins.port), // d2
pins.d8.into_open_drain_output(&mut pins.port), // d3
pins.d9.into_open_drain_output(&mut pins.port), // d4
pins.d10.into_open_drain_output(&mut pins.port), // d5
pins.d11.into_open_drain_output(&mut pins.port), // d6
pins.d12.into_open_drain_output(&mut pins.port), // d7
delay,
);
// Unshift display and set cursor to 0
lcd.reset();
// Clear existing characters
lcd.clear();
// Display the following string
lcd.write_str("Hello, world!");
loop { }
}
|
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
#[derive(Debug, Clone)]
pub struct Event{
created: String,
dtend: String,
dtstart: String,
summary: String,
uid: String,
}
#[wasm_bindgen]
pub fn parse_string(data:String) {
let mut created ="";
let mut dtend ="";
let mut dtstart = "";
let mut summary = "";
let mut uid = "";
let mut x :Vec<Event> = Vec::new();
let _v: Vec<String> = data.split("\n").map(|s| s.to_string()).collect();
let index_outer=0;
for index_outer in index_outer.._v.len(){
if _v[index_outer]==("BEGIN:VEVENT") {
let index= index_outer;
for index in index.._v.len(){
if _v[index].contains("CREATED"){
let created_vec: Vec<&str> = _v[index].split(':').collect();
created=created_vec[1];
}
else if _v[index].contains("DTEND"){
let dtend_vec: Vec<&str> = _v[index].split(';').collect();
dtend=dtend_vec[1];
}
else if _v[index].contains("DTSTART"){
let dstart_vec: Vec<&str> = _v[index].split(';').collect();
dtstart=dstart_vec[1];
}
else if _v[index].contains("SUMMARY"){
let summary_vec: Vec<&str> = _v[index].split(':').collect();
summary=summary_vec[1];
}
else if _v[index].contains("UID"){
let uid_vec: Vec<&str> = _v[index].split(':').collect();
uid=uid_vec[1];
}
else if _v[index].contains("END:VEVENT"){
let eventfound = Event {
created: created.to_string(),
dtend: dtend.to_string(),
dtstart: dtstart.to_string(),
summary: summary.to_string(),
uid: uid.to_string(),
};
x.push(eventfound);
break;
}
}
}
}
}
|
use std::convert::From;
use std::ops::Deref;
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct U5(u8);
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct U7(u8);
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct U14(u16);
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct Channel(u8);
impl From<u8> for U7 {
fn from(i: u8) -> U7 {
debug_assert!(i & 0x80 == 0);
U7(i & 0x7f)
}
}
impl Deref for U7 {
type Target = u8;
#[inline]
fn deref(&self) -> &u8 { &self.0 }
}
impl From<u8> for U5 {
fn from(i: u8) -> U5 {
debug_assert!(i & 0xe0 == 0);
U5(i & 0x1f)
}
}
impl Deref for U5 {
type Target = u8;
#[inline]
fn deref(&self) -> &u8 { &self.0 }
}
impl From<u16> for U14 {
fn from(i: u16) -> U14 {
debug_assert!(i & 0xc000 == 0);
U14(i & 0x3fff)
}
}
impl From<(U7, U7)> for U14 {
fn from(i: (U7, U7)) -> U14 {
U14(*i.0 as u16 + ((*i.1 as u16) << 7))
}
}
impl From<U14> for (U7, U7) {
fn from(i: U14) -> (U7, U7) {
(i.lsb(), i.msb())
}
}
impl U14 {
#[inline]
pub fn lsb(&self) -> U7 { U7((self.0 & 0x7f) as u8) }
#[inline]
pub fn msb(&self) -> U7 { U7((self.0 >> 7) as u8) }
}
impl Deref for U14 {
type Target = u16;
#[inline]
fn deref(&self) -> &u16 { &self.0 }
}
impl From<u8> for Channel {
fn from(i: u8) -> Channel {
debug_assert!(i & 0xf0 == 0);
Channel(i & 0x0f)
}
}
impl Deref for Channel {
type Target = u8;
#[inline]
fn deref(&self) -> &u8 { &self.0 }
}
#[test]
#[should_panic]
fn out_of_range() {
let _: U7 = 129.into();
}
#[test]
fn test_u14() {
let a = U7(0x40);
let b = U7(0x30);
let c: U14 = (b, a).into();
assert_eq!(c.msb(), a);
assert_eq!(c.lsb(), b);
assert_eq!(*c, 0x2030);
}
|
struct MyStruct {
name : String
}
impl MyStruct {
fn f1(&mut self) {
self.name = "hello".to_string();
self.f2("world");
}
fn f2(&mut self, last_name : &str) {
self.name = last_name.to_string();
println!("{}", self.name);
}
}
fn main() {
let mut x = 1i32;
{
let y = &mut x;
//x = 2; frozen
*y = 3i32;
println!("y={}", *y);
}
println!("x={}", x);
let mut s = MyStruct { name : "".to_string()};
s.f1();
let const_vec = {
let mut vec : [i32; 10] = [0; 10];
vec[1] = 2;
vec[5] = 3;
vec
};
println!("vec init : {}", const_vec[5]);
} |
//! TODO docs
use serde::{Deserializer, Serializer};
/// TODO docs
#[inline]
pub fn bool_or_integer<'de, D>(deserializer: D) -> Result<bool, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(BoolOrIntegerVisitor)
}
/// TODO docs
pub struct BoolOrIntegerVisitor;
impl<'de> serde::de::Visitor<'de> for BoolOrIntegerVisitor {
type Value = bool;
fn expecting(
&self,
formatter: &mut std::fmt::Formatter,
) -> std::fmt::Result {
formatter.write_str("a bool or integer")
}
fn visit_bool<E>(self, value: bool) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(value)
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(value == 1)
}
}
/// TODO docs
#[inline]
pub fn bool_to_u8<S>(x: &bool, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let num = if *x { 1 } else { 0 };
s.serialize_u8(num)
}
/// TODO docs
#[inline]
pub fn f64_or_string<'de, D>(deserializer: D) -> Result<f64, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(F64OrStringVisitor)
}
/// TODO docs
pub struct F64OrStringVisitor;
impl<'de> serde::de::Visitor<'de> for F64OrStringVisitor {
type Value = f64;
fn expecting(
&self,
formatter: &mut std::fmt::Formatter,
) -> std::fmt::Result {
formatter.write_str("a number or string")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
value.parse().map_err(serde::de::Error::custom)
}
fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(value)
}
}
/// TODO docs
#[inline]
pub fn u64_or_string<'de, D>(deserializer: D) -> Result<u64, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(U64OrStringVisitor)
}
/// TODO docs
pub struct U64OrStringVisitor;
impl<'de> serde::de::Visitor<'de> for U64OrStringVisitor {
type Value = u64;
fn expecting(
&self,
formatter: &mut std::fmt::Formatter,
) -> std::fmt::Result {
formatter.write_str("a number or string")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
value.parse().map_err(serde::de::Error::custom)
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(value)
}
}
|
mod gltf;
mod object_trs;
mod curve;
mod primitive;
use nitro::Model;
use db::{Database, ModelId};
use connection::Connection;
use primitives::{Primitives, PolyType, DynamicState};
use skeleton::{Skeleton, Transform, SMatrix};
use super::image_namer::ImageNamer;
use cgmath::Matrix4;
use json::JsonValue;
use self::gltf::{GlTF, Buffer, ByteVec, VecExt};
use self::object_trs::ObjectTRSes;
use util::{BiVec, BiMap};
use self::curve::{GlTFObjectCurves, CurveDomain};
use nitro::animation::Curve;
use std::collections::HashMap;
use self::primitive::encode_ngons;
use nds::Alpha;
static FRAME_LENGTH: f32 = 1.0 / 60.0; // 60 fps
struct Ctx<'a> {
model_id: ModelId,
model: &'a Model,
db: &'a Database,
conn: &'a Connection,
image_namer: &'a ImageNamer,
rest_trses: ObjectTRSes,
prims: &'a Primitives,
skel: &'a Skeleton,
}
pub fn to_gltf(
db: &Database,
conn: &Connection,
image_namer: &ImageNamer,
model_id: ModelId,
) -> GlTF {
let model = &db.models[model_id];
let rest_trses = ObjectTRSes::for_model_at_rest(model);
let objects = rest_trses.objects.iter()
.map(Matrix4::from)
.collect::<Vec<_>>();
let uv_mats = model.materials.iter()
.map(|mat| mat.texture_mat)
.collect::<Vec<Matrix4<f64>>>();
let state = DynamicState { objects: &objects, uv_mats: &uv_mats };
let prims = Primitives::build(model, PolyType::TrisAndQuads, state);
let prims = &encode_ngons(prims);
let skel = &Skeleton::build(model, &objects);
let ctx = Ctx { model_id, model, db, conn, image_namer, rest_trses, prims, skel };
let mut gltf = GlTF::new();
if !ctx.prims.vertices.is_empty() {
mesh(&ctx, &mut gltf);
nodes(&ctx, &mut gltf);
animations(&ctx, &mut gltf);
}
materials(&ctx, &mut gltf);
gltf.cleanup();
gltf
}
// glTF constants
static UNSIGNED_BYTE: u32 = 5121;
static UNSIGNED_SHORT: u32 = 5123;
static FLOAT: u32 = 5126;
static NEAREST: u32 = 9728;
fn mesh(ctx: &Ctx, gltf: &mut GlTF) {
let verts = &ctx.prims.vertices;
// Positions
// glTF wants the min/max, so compute that first
let mut min = verts[0].position.clone();
let mut max = verts[0].position.clone();
for v in verts {
for i in 0..3 {
min[i] = min[i].min(v.position[i]);
max[i] = max[i].max(v.position[i]);
}
}
let pos_accessor = {
let buf = gltf.buffers.add(Buffer {
alignment: 4,
bytes: Vec::with_capacity(3 * verts.len() * 4),
});
let dat = &mut gltf.buffers[buf].bytes;
for v in verts {
dat.push_f32(v.position[0]);
dat.push_f32(v.position[1]);
dat.push_f32(v.position[2]);
}
let buf_view = gltf.json["bufferViews"].add(object!(
"buffer" => buf,
"byteLength" => dat.len(),
));
gltf.json["accessors"].add(object!(
"bufferView" => buf_view,
"type" => "VEC3",
"componentType" => FLOAT,
"count" => verts.len(),
"min" => min.to_vec(),
"max" => max.to_vec(),
))
};
// Texcoord
let has_texcoords = ctx.prims.draw_calls.iter().any(|call| call.used_texcoords);
let tex_accessor = if has_texcoords {
let buf = gltf.buffers.add(Buffer {
alignment: 4,
bytes: Vec::with_capacity(2 * verts.len() * 4),
});
let dat = &mut gltf.buffers[buf].bytes;
for v in verts {
dat.push_f32(v.texcoord[0]);
dat.push_f32(1.0 - v.texcoord[1]);
}
let buf_view = gltf.json["bufferViews"].add(object!(
"buffer" => buf,
"byteLength" => dat.len(),
));
Some(gltf.json["accessors"].add(object!(
"bufferView" => buf_view,
"type" => "VEC2",
"componentType" => FLOAT,
"count" => verts.len(),
)))
} else {
None
};
// Color
let has_colors = ctx.prims.draw_calls.iter().any(|call| call.used_vertex_color);
let color_accessor = if has_colors {
let buf = gltf.buffers.add(Buffer {
alignment: 4,
bytes: Vec::with_capacity(4 * verts.len() * 1),
});
let dat = &mut gltf.buffers[buf].bytes;
// Is the DS in sRGB??
fn srgb_to_linear(s: f32) -> f32 {
if s < 0.04045 {
s / 12.92
} else {
((s + 0.055) / 1.055).powf(2.4)
}
}
// Since each channel is originally only 5 bits, 8 bits should be enough
// to store it in linear space, so use normalized u8s.
for v in verts {
dat.push_normalized_u8(srgb_to_linear(v.color[0]));
dat.push_normalized_u8(srgb_to_linear(v.color[1]));
dat.push_normalized_u8(srgb_to_linear(v.color[2]));
dat.push(255); // padding
}
let buf_view = gltf.json["bufferViews"].add(object!(
"buffer" => buf,
"byteLength" => dat.len(),
"byteStride" => 4,
));
Some(gltf.json["accessors"].add(object!(
"bufferView" => buf_view,
"type" => "VEC3",
"componentType" => UNSIGNED_BYTE,
"normalized" => true,
"count" => verts.len(),
)))
} else {
None
};
// Normals
let has_normals = ctx.prims.draw_calls.iter().any(|call| call.used_normals);
let normal_accessor = if has_normals {
let buf = gltf.buffers.add(Buffer {
alignment: 4,
bytes: Vec::with_capacity(3 * verts.len() * 4),
});
let dat = &mut gltf.buffers[buf].bytes;
for v in verts {
dat.push_f32(v.normal[0]);
dat.push_f32(v.normal[1]);
dat.push_f32(v.normal[2]);
}
let buf_view = gltf.json["bufferViews"].add(object!(
"buffer" => buf,
"byteLength" => dat.len(),
));
Some(gltf.json["accessors"].add(object!(
"bufferView" => buf_view,
"type" => "VEC3",
"componentType" => FLOAT,
"count" => verts.len(),
)))
} else {
None
};
// Now joints/weights
// glTF gives joint/weight influences in sets of 4 (JOINT_0 is a VEC4
// accessor with the first four joints, JOINTS_1 has the next four, etc).
// Find out how many sets we need.
let num_sets = ((ctx.skel.max_num_weights + 3) / 4) as usize;
// Make sure joints fit in a byte
assert!(ctx.skel.tree.node_count() <= 255);
// Joints
let joints_accessors = {
let buf = gltf.buffers.add(Buffer {
alignment: 4,
bytes: Vec::with_capacity(4 * num_sets * verts.len() * 1),
});
let dat_len = {
let dat = &mut gltf.buffers[buf].bytes;
for vi in 0 .. verts.len() {
let ws = ctx.skel.vert_weights(vi);
for i in 0 .. 4 * num_sets {
if i < ws.len() {
dat.push(ws[i].joint as u8);
} else {
dat.push(0);
}
}
}
dat.len()
};
(0..num_sets).map(|set_num| {
let buf_view = gltf.json["bufferViews"].add(object!(
"buffer" => buf,
"byteOffset" => 4 * set_num,
"byteStride" => 4 * num_sets,
"byteLength" => dat_len - 4 * set_num,
));
gltf.json["accessors"].add(object!(
"bufferView" => buf_view,
"type" => "VEC4",
"componentType" => UNSIGNED_BYTE,
"count" => verts.len(),
))
}).collect::<Vec<_>>()
};
// Weights
let weights_accessors = {
let buf = gltf.buffers.add(Buffer {
alignment: 4,
bytes: Vec::with_capacity(4 * num_sets * verts.len() * 1),
});
let dat_len = {
let dat = &mut gltf.buffers[buf].bytes;
for vi in 0 .. verts.len() {
let ws = ctx.skel.vert_weights(vi);
for i in 0 .. 4 * num_sets {
if i < ws.len() {
dat.push_normalized_u8(ws[i].weight);
} else {
dat.push(0);
}
}
}
dat.len()
};
(0..num_sets).map(|set_num| {
let buf_view = gltf.json["bufferViews"].add(object!(
"buffer" => buf,
"byteOffset" => 4 * set_num,
"byteStride" => 4 * num_sets,
"byteLength" => dat_len - 4 * set_num,
));
gltf.json["accessors"].add(object!(
"bufferView" => buf_view,
"type" => "VEC4",
"componentType" => UNSIGNED_BYTE,
"normalized" => true,
"count" => verts.len(),
))
}).collect::<Vec<_>>()
};
// Put the indices into a buffer view
let index_buf_view = {
let buf = gltf.buffers.add(Buffer {
alignment: 2,
bytes: Vec::with_capacity(verts.len() * 2),
});
let dat = &mut gltf.buffers[buf].bytes;
for &ind in &ctx.prims.indices {
dat.push_u16(ind);
}
gltf.json["bufferViews"].add(object!(
"buffer" => buf,
"byteLength" => dat.len(),
))
};
// One glTF primitive per draw call
let primitives = ctx.prims.draw_calls.iter().map(|call| {
let indices_accessor = gltf.json["accessors"].add(object!(
"bufferView" => index_buf_view,
"type" => "SCALAR",
"byteOffset" => 2 * call.index_range.start,
"componentType" => UNSIGNED_SHORT,
"count" => call.index_range.len(),
));
let mut primitive = object!(
"attributes" => object!(
"POSITION" => pos_accessor,
),
"material" => call.mat_id,
"indices" => indices_accessor,
"extensions" => object!(
"FB_ngon_encoding" => object!(),
),
);
if let Some(tex_accessor) = tex_accessor {
primitive["attributes"]["TEXCOORD_0"] = tex_accessor.into();
}
if let Some(color_accessor) = color_accessor {
primitive["attributes"]["COLOR_0"] = color_accessor.into();
}
if let Some(normal_accessor) = normal_accessor {
primitive["attributes"]["NORMAL"] = normal_accessor.into();
}
for (set_num, &joints_accessor) in joints_accessors.iter().enumerate() {
primitive["attributes"][format!("JOINTS_{}", set_num)] = joints_accessor.into();
}
for (set_num, &weights_accessor) in weights_accessors.iter().enumerate() {
primitive["attributes"][format!("WEIGHTS_{}", set_num)] = weights_accessor.into();
}
primitive
}).collect::<Vec<JsonValue>>();
let mesh = object!(
"primitives" => primitives,
"name" => ctx.model.name.to_string(),
);
gltf.json["meshes"] = array!(mesh);
gltf.json["extensionsUsed"].push("FB_ngon_encoding").unwrap();
}
fn nodes(ctx: &Ctx, gltf: &mut GlTF) {
if ctx.prims.draw_calls.is_empty() {
return;
}
// Make a node tree from the skeleton tree. The NodeIndices for skel.tree
// are the same as the indices into the glTF nodes array.
gltf.json["nodes"] = ctx.skel.tree.node_idxs().map(|idx| {
let mut node = object!();
let children = ctx.skel.tree
.children(idx)
.collect::<Vec<_>>();
if !children.is_empty() {
node["children"] = children.into();
}
match ctx.skel.tree[idx].local_to_parent {
Transform::Root => {
node["name"] = "<ROOT>".into();
}
Transform::SMatrix(SMatrix::Object { object_idx }) => {
node["name"] = ctx.model
.objects[object_idx as usize]
.name
.to_string()
.into();
let trs = &ctx.rest_trses.objects[object_idx as usize];
if let Some(t) = trs.translation {
node["translation"] = array!(t.x, t.y, t.z);
}
if let Some(r) = trs.rotation_quaternion {
node["rotation"] = array!(r.v.x, r.v.y, r.v.z, r.s);
}
if let Some(s) = trs.scale {
node["scale"] = array!(s.x, s.y, s.z);
}
}
Transform::SMatrix(SMatrix::InvBind { inv_bind_idx }) => {
node["name"] = format!("<INV BIND #{}>", inv_bind_idx).into();
// TODO
}
Transform::SMatrix(SMatrix::Uninitialized { stack_pos }) => {
node["name"] = format!("<UNINITIALIZED #{}>", stack_pos).into();
}
}
node
}).collect::<Vec<_>>().into();
// Add another node above the skeleton root to instantiate the mesh at
// (glTF-Blender-IO doesn't like it when we instantiate a mesh on a node
// that's also used as a joint).
gltf.json["nodes"].push(object!(
"mesh" => 0,
"skin" => 0,
"name" => ctx.model.name.to_string(),
"children" => array!(ctx.skel.root),
)).unwrap();
// Make the skin
let skel = &ctx.skel;
let inv_bind_accessor = {
let buf = gltf.buffers.add(Buffer {
alignment: 4,
bytes: Vec::with_capacity(16 * skel.tree.node_count() * 4),
});
let dat = &mut gltf.buffers[buf].bytes;
for joint_idx in skel.tree.node_idxs() {
let joint = &skel.tree[joint_idx];
let matrix: &[f64; 16] = joint.rest_world_to_local.as_ref();
for &entry in matrix {
dat.push_f32(entry as f32);
}
}
let buf_view = gltf.json["bufferViews"].add(object!(
"buffer" => buf,
"byteLength" => dat.len(),
));
gltf.json["accessors"].add(object!(
"bufferView" => buf_view,
"type" => "MAT4",
"componentType" => FLOAT,
"count" => skel.tree.node_count(),
))
};
gltf.json["skins"] = array!(
object!(
"skeleton" => skel.root,
"joints" => (0..skel.tree.node_count()).collect::<Vec<_>>(),
"inverseBindMatrices" => inv_bind_accessor,
)
);
gltf.json["scenes"] = array!(
object!(
"nodes" => array!(skel.tree.node_count()),
"name" => ctx.model.name.to_string(),
)
);
gltf.json["scene"] = 0.into();
}
fn animations(ctx: &Ctx, gltf: &mut GlTF) {
let models_animations = &ctx.conn.models[ctx.model_id].animations;
let mat_animations = &ctx.conn.models[ctx.model_id].mat_anims;
if models_animations.is_empty() && mat_animations.is_empty() {
return;
}
#[derive(Hash, Copy, Clone, PartialEq, Eq)]
struct TimelineDescriptor {
start_frame: u16,
end_frame: u16,
sampling_rate: u16,
}
// Maps an accessor index to the description of the keyframes it should
// contain. Let's us reuse keyframes between curves. We wait until we're
// done writing the animations to actually fill in the accessor fields
// though.
let mut timeline_descs = BiMap::<usize, TimelineDescriptor>::new();
// Used to hold the sample data.
let data_buffer = gltf.buffers.add(Buffer {
bytes: Vec::with_capacity(1024 * 512),
alignment: 4,
});
let data_buf_view = gltf.json["bufferViews"].add(object!(
"buffer" => data_buffer,
// NOTE: must fill out byte length when we finish writing to data_buffer
));
let mut animations =
models_animations.iter()
.map(|&animation_id| {
let anim = &ctx.db.animations[animation_id];
let object_curves =
anim.objects_curves.iter()
.map(|c| GlTFObjectCurves::for_trs_curves(c))
.collect::<Vec<GlTFObjectCurves>>();
#[derive(Hash, Clone, Copy, PartialEq, Eq)]
enum SamplerPath {
Translation,
Rotation,
Scale,
}
#[derive(Hash, Clone, Copy, PartialEq, Eq)]
struct SamplerDescriptor {
object_idx: u8,
path: SamplerPath,
}
// Each glTF sampler will contain the curve for one TRS property of
// one object matrix. This maps a sampler index to the description
// of what it will contain.
let mut sampler_descs = BiVec::<SamplerDescriptor>::new();
// The channels array wires nodes/paths up to the samplers they use.
let mut channels = Vec::<JsonValue>::new();
for node_idx in ctx.skel.tree.node_idxs() {
// Only objects are animated
let object_idx = match ctx.skel.tree[node_idx].local_to_parent {
Transform::SMatrix(SMatrix::Object { object_idx }) => object_idx,
_ => continue,
};
let curves = &object_curves[object_idx as usize];
// Add channels for any of the TRSs that are animated for this
// object
if let Curve::Samples { .. } = curves.translation {
let sampler_descriptor = SamplerDescriptor {
object_idx,
path: SamplerPath::Translation,
};
sampler_descs.push(sampler_descriptor);
channels.push(object!(
"target" => object!(
"node" => node_idx,
"path" => "translation",
),
"sampler" => sampler_descs.idx(&sampler_descriptor),
));
}
if let Curve::Samples { .. } = curves.rotation {
let sampler_descriptor = SamplerDescriptor {
object_idx,
path: SamplerPath::Rotation,
};
sampler_descs.push(sampler_descriptor);
channels.push(object!(
"target" => object!(
"node" => node_idx,
"path" => "rotation",
),
"sampler" => sampler_descs.idx(&sampler_descriptor),
));
}
if let Curve::Samples { .. } = curves.scale {
let sampler_descriptor = SamplerDescriptor {
object_idx,
path: SamplerPath::Scale,
};
sampler_descs.push(sampler_descriptor);
channels.push(object!(
"target" => object!(
"node" => node_idx,
"path" => "scale",
),
"sampler" => sampler_descs.idx(&sampler_descriptor),
));
}
}
// Now use the sampler descriptions to write the actual samplers
let samplers = sampler_descs.iter().map(|desc| {
let &SamplerDescriptor { object_idx, path } = desc;
let curves = &object_curves[object_idx as usize];
let domain = match path {
SamplerPath::Translation => curves.translation.domain(),
SamplerPath::Rotation => curves.rotation.domain(),
SamplerPath::Scale => curves.scale.domain(),
};
let (start_frame, end_frame, sampling_rate) = match domain {
CurveDomain::None => unreachable!(),
CurveDomain::Sampled { start_frame, end_frame, sampling_rate } =>
(start_frame, end_frame, sampling_rate),
};
let timeline_descriptor = TimelineDescriptor {
start_frame, end_frame, sampling_rate,
};
// Reserve the input accessor
if !timeline_descs.right_contains(&timeline_descriptor) {
let accessor = gltf.json["accessors"].add(
JsonValue::new_object()
);
timeline_descs.insert((accessor, timeline_descriptor));
};
let &input = timeline_descs.backward(&timeline_descriptor);
// Make the output accessor
let data = &mut gltf.buffers[data_buffer].bytes;
let output = match path {
SamplerPath::Translation | SamplerPath::Scale => {
let values = match path {
SamplerPath::Translation => match curves.translation {
Curve::Samples { ref values, .. } => values,
_ => unreachable!(),
},
SamplerPath::Scale => match curves.scale {
Curve::Samples { ref values, .. } => values,
_ => unreachable!(),
},
_ => unreachable!(),
};
let byte_offset = data.len();
data.reserve(3 * values.len() * 4);
for v in values {
data.push_f32(v.x as f32);
data.push_f32(v.y as f32);
data.push_f32(v.z as f32);
}
gltf.json["accessors"].add(object!(
"bufferView" => data_buf_view,
"type" => "VEC3",
"componentType" => FLOAT,
"byteOffset" => byte_offset,
"count" => values.len(),
))
}
SamplerPath::Rotation => {
let values = match curves.rotation {
Curve::Samples { ref values, .. } => values,
_ => unreachable!(),
};
let byte_offset = data.len();
data.reserve(4 * values.len() * 4);
for quat in values {
data.push_f32(quat.v.x as f32);
data.push_f32(quat.v.y as f32);
data.push_f32(quat.v.z as f32);
data.push_f32(quat.s as f32);
}
gltf.json["accessors"].add(object!(
"bufferView" => data_buf_view,
"type" => "VEC4",
"componentType" => FLOAT,
"byteOffset" => byte_offset,
"count" => values.len(),
))
// IDEA: would probably be okay to emit normalized i16s
// instead of floats...
}
};
object!(
"input" => input,
"output" => output,
)
}).collect::<Vec<JsonValue>>();
object!(
"name" => anim.name.to_string(),
"samplers" => samplers,
"channels" => channels,
)
})
.collect::<Vec<JsonValue>>();
// Now material animations
let model = &ctx.db.models[ctx.model_id];
let mut had_mat_anims = false;
for mat_anim_conn in mat_animations {
let mat_anim = &ctx.db.mat_anims[mat_anim_conn.mat_anim_id];
let mut channels: Vec<JsonValue> = vec![];
let mut samplers: Vec<JsonValue> = vec![];
for track in &mat_anim.tracks {
let u_off_curve = &track.channels[3].curve;
let v_off_curve = &track.channels[4].curve;
// Get common domain
let domain = u_off_curve.domain().union(v_off_curve.domain());
let (start_frame, end_frame, sampling_rate) = match domain {
CurveDomain::None => continue,
CurveDomain::Sampled { start_frame, end_frame, sampling_rate } =>
(start_frame, end_frame, sampling_rate),
};
let timeline_descriptor = TimelineDescriptor {
start_frame, end_frame, sampling_rate,
};
// Reserve the input accessor
if !timeline_descs.right_contains(&timeline_descriptor) {
let accessor = gltf.json["accessors"].add(
JsonValue::new_object()
);
timeline_descs.insert((accessor, timeline_descriptor));
};
let &input = timeline_descs.backward(&timeline_descriptor);
// Find the target
let material_idx = model.materials.iter().position(|mat| mat.name == track.name).unwrap();
let target = format!(
"/materials/{}/pbrMetallicRoughness/baseColorTexture/extensions/KHR_texture_transform/offset",
material_idx,
);
// Find texture dimensions
let (w, h) = (model.materials[material_idx].width as f64, model.materials[material_idx].height as f64);
let data = &mut gltf.buffers[data_buffer].bytes;
let byte_offset = data.len();
let num_samples = (end_frame - start_frame) / sampling_rate ;
data.reserve(4 * 2 * num_samples as usize);
let mut frame = start_frame;
while frame < end_frame {
let u_off = u_off_curve.sample_at(0.0, frame);
let v_off = v_off_curve.sample_at(0.0, frame);
// Convert to glTF texture space
let u_off = u_off / w;
let v_off = v_off / h;
data.push_f32(u_off as f32);
data.push_f32(v_off as f32);
frame += sampling_rate;
}
let output = gltf.json["accessors"].add(object!(
"bufferView" => data_buf_view,
"type" => "VEC2",
"componentType" => FLOAT,
"byteOffset" => byte_offset,
"count" => num_samples,
));
let sampler = samplers.add(object!(
"input" => input,
"output" => output,
));
channels.push(object!(
"target" => target,
"sampler" => sampler,
));
}
if samplers.is_empty() { continue }
animations.push(object!(
"name" => mat_anim.name.to_string(),
"samplers" => samplers,
// glTF requires this be non-empty, so we add a channel that does
// nothing.
"channels" => vec![object!(
"target" => object!("path" => "scale"),
"sampler" => 0,
)],
"extensions" => object!(
"EXT_property_animation" => object!(
"channels" => channels,
)
)
));
had_mat_anims = true;
}
if had_mat_anims {
gltf.json["extensionsUsed"].push("KHR_texture_transform").unwrap();
gltf.json["extensionsUsed"].push("EXT_property_animation").unwrap();
}
gltf.json["bufferViews"][data_buf_view]["byteLength"] =
gltf.buffers[data_buffer].bytes.len().into();
// Now we need to write out the keyframe descriptors to real accessors.
// The reason we deferred it is because we can share most of this data.
//
// For each rate, find the range of values used by timelines with that
// rate. Write that range of values sampled at that rate into a buffer.
// Eg.
//
// rate 1: 1 2 3 4 5
// rate 2: 2 4
// rate 4: 4 8 12
//
// Make a buffer view for each of these rates. Then for each accessor,
// reference the buffer view for that rate and use the byteOffset and
// count properties to select the appropriate subrange.
let mut rate_to_range = HashMap::<u16, std::ops::Range<u16>>::new();
let mut rate_to_buf_view = HashMap::<u16, usize>::new();
for (_, &timeline_desc) in timeline_descs.iter() {
let TimelineDescriptor { start_frame, end_frame, sampling_rate } =
timeline_desc;
let range =
rate_to_range.entry(sampling_rate)
.or_insert(start_frame..end_frame);
range.start = range.start.min(start_frame);
range.end = range.end.max(end_frame);
}
let time_buf = gltf.buffers.add(Buffer {
alignment: 4,
bytes: vec![],
});
let dat = &mut gltf.buffers[time_buf].bytes;
for (&rate, range) in rate_to_range.iter() {
let byte_offset = dat.len();
let mut frame = range.start;
while frame < range.end {
dat.push_f32(frame as f32 * FRAME_LENGTH);
frame += rate;
}
let buf_view = gltf.json["bufferViews"].add(object!(
"buffer" => time_buf,
"byteOffset" => byte_offset,
"byteLength" => dat.len() - byte_offset,
));
rate_to_buf_view.insert(rate, buf_view);
}
for (&accessor_idx, &timeline_desc) in timeline_descs.iter() {
let TimelineDescriptor { start_frame, end_frame, sampling_rate } =
timeline_desc;
let range = rate_to_range[&sampling_rate].clone();
let buf_view = rate_to_buf_view[&sampling_rate];
// The offset inside the buffer view of our starting frame
let offset = (start_frame - range.start) / sampling_rate;
let byte_offset = 4 * offset;
let min = start_frame as f32 * FRAME_LENGTH;
let max = (end_frame - sampling_rate) as f32 * FRAME_LENGTH;
gltf.json["accessors"][accessor_idx] = object!(
"bufferView" => buf_view,
"type" => "SCALAR",
"componentType" => FLOAT,
"byteOffset" => byte_offset,
"count" => (end_frame - start_frame) / sampling_rate,
"min" => array!(min),
"max" => array!(max),
);
}
gltf.json["animations"] = animations.into();
}
fn materials(ctx: &Ctx, gltf: &mut GlTF) {
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
enum WrapMode {
Clamp,
MirroredRepeat,
Repeat,
}
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
struct SamplerDescriptor {
wrap_s: WrapMode,
wrap_t: WrapMode,
}
// Maps a sampler index to the wrapping mode it should use.
let mut sampler_descs = BiVec::<SamplerDescriptor>::new();
// Maps an image index to the image name it should use.
let mut image_descs = BiVec::<String>::new();
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
struct TextureDescriptor {
sampler: usize,
image: usize,
}
// Maps a texture index to the sampler and image it will use.
let mut texture_descs = BiVec::<TextureDescriptor>::new();
let materials = ctx.model.materials.iter().enumerate()
.map(|(material_idx, material)| {
let mut mat = object!(
"name" => material.name.to_string(),
"pbrMetallicRoughness" => JsonValue::new_object(),
"extensions" => object!(
"KHR_materials_unlit" => JsonValue::new_object(),
)
);
let image_id =
ctx.conn.models[ctx.model_id]
.materials[material_idx].image_id();
match image_id {
Ok(Some(image_id)) => {
let params = ctx.db.textures[image_id.0].params;
match params.format().alpha_type(params) {
Alpha::Opaque => (),
Alpha::Transparent =>
mat["alphaMode"] = "MASK".into(),
Alpha::Translucent =>
mat["alphaMode"] = "BLEND".into(),
}
let wrap = |repeat, mirror| {
match (repeat, mirror) {
(false, _) => WrapMode::Clamp,
(true, false) => WrapMode::Repeat,
(true, true) => WrapMode::MirroredRepeat,
}
};
let params = material.params;
let sampler_desc = SamplerDescriptor {
wrap_s: wrap(params.repeat_s(), params.mirror_s()),
wrap_t: wrap(params.repeat_t(), params.mirror_t()),
};
let sampler = sampler_descs.push(sampler_desc);
let image_name = &ctx.image_namer.names[&image_id];
let image = image_descs.push(image_name.clone());
let texture_desc = TextureDescriptor { sampler, image };
let texture = texture_descs.push(texture_desc);
mat["pbrMetallicRoughness"]["baseColorTexture"] =
object!("index" => texture);
mat["pbrMetallicRoughness"]["metallicFactor"] = 0.into();
}
_ => (),
}
let has_diffuse =
!material.diffuse_is_default_vertex_color &&
material.diffuse != [1.0, 1.0, 1.0];
if has_diffuse || material.alpha != 1.0 {
let [r, g, b] = if has_diffuse {
material.diffuse
} else {
[1.0, 1.0, 1.0]
};
mat["pbrMetallicRoughness"]["baseColorFactor"] = array!(r, g, b, material.alpha);
}
if material.alpha == 0.0 {
mat["alphaMode"] = "MASK".into();
} else if material.alpha != 1.0 {
mat["alphaMode"] = "BLEND".into();
}
if material.emission != [0.0, 0.0, 0.0] {
// Does nothing since we use KHR_materials_unlit
mat["emissiveFactor"] = material.emission.to_vec().into();
}
if !material.cull_backface {
mat["doubleSided"] = true.into();
}
// TODO: handle cull frontfacing
if mat["pbrMetallicRoughness"].is_empty() {
mat.remove("pbrMetallicRoughness");
}
mat
}).collect::<Vec<JsonValue>>();
let wrap = |wrap_mode| {
match wrap_mode {
WrapMode::Clamp => 33071,
WrapMode::MirroredRepeat => 33648,
WrapMode::Repeat => 10497,
}
};
gltf.json["samplers"] = sampler_descs.iter().map(|desc| {
object!(
"wrapS" => wrap(desc.wrap_s),
"wrapT" => wrap(desc.wrap_t),
"magFilter" => NEAREST,
"minFilter" => NEAREST,
)
}).collect::<Vec<JsonValue>>().into();
gltf.json["images"] = image_descs.iter().map(|name| {
object!(
"uri" => format!("{}.png", name),
)
}).collect::<Vec<JsonValue>>().into();
gltf.json["textures"] = texture_descs.iter().map(|desc| {
object!(
"source" => desc.image,
"sampler" => desc.sampler,
)
}).collect::<Vec<JsonValue>>().into();
gltf.json["materials"] = materials.into();
if gltf.json["samplers"].is_empty() { gltf.json.remove("samplers"); }
if gltf.json["images"].is_empty() { gltf.json.remove("images"); }
if gltf.json["textures"].is_empty() { gltf.json.remove("textures"); }
if gltf.json["materials"].is_empty() { gltf.json.remove("materials"); }
if gltf.json.has_key("materials") {
gltf.json["extensionsUsed"].push("KHR_materials_unlit").unwrap();
}
}
|
extern crate image;
use crate::utils;
use image::{DynamicImage, ImageFormat};
use std::fs::{File, OpenOptions};
use std::io::Error;
trait Rotator {
fn rotate(&mut self);
}
//use exif
pub struct JPEGRotator {
path: String,
file: File,
}
impl JPEGRotator {
/// Returns a new JPEG rotator if the image file is successfully opened.
/// Otherwise, an Error
pub fn new(path: String) -> Result<Self, Error> {
let file = OpenOptions::new().read(true).write(true).open(&path)?;
Ok(JPEGRotator { path, file })
}
/// Performs rotation on the internal image handle depending on
/// the orientation value.
pub async fn rotate(&self) -> Result<(), String> {
println!("processing file: {}", self.path);
let exif = utils::get_exif(&self.file)?;
let orientation = utils::get_orientation_value(&exif);
println!("{}: orientation is {}", self.path, orientation);
if let Some(image) = self.perform_rotation(orientation) {
let _exif = utils::get_orientation_fixed_exif(&exif);
//TODO abhi: find some way to insert EXIF data after the JFIF data
self.save(image);
}
Ok(())
}
/// Performs rotation based on the orientation value
fn perform_rotation(&self, orientation: u16) -> Option<DynamicImage> {
let bufreader = std::io::BufReader::new(&self.file);
match image::load(bufreader, ImageFormat::Jpeg) {
Ok(image) => match orientation {
1 | 2 | 4 | 5 | 7 => {
println!("{}: orientation looks cool already!", self.path);
None
}
8 => {
println!("{}: rotating 270 degrees ...", self.path);
Some(image.rotate270())
}
3 => {
println!("{}: rotating 180 degrees ...", self.path);
Some(image.rotate180())
}
6 => {
println!("{}: rotating 90 degrees ...", self.path);
Some(image.rotate90())
}
_ => {
println!("{}: invalid orientation found :(", self.path);
None
}
},
_ => {
println!(
"{}: error loading image. this tool works with valid jpeg images.",
self.path
);
None
}
}
}
/// Saves the rotated image to the same file
fn save(&self, image: DynamicImage) {
match image.save(&self.path) {
Ok(_) => {
println!("{}: saved file!", self.path);
}
Err(e) => println!("{}: error saving file {}", e, self.path),
}
}
}
|
fn main() {
let numbers_str = include_str!("inputs/day01.txt");
let numbers: Vec<u32> = numbers_str
.lines()
.map(|x| x.parse::<u32>().unwrap())
.collect();
let size = numbers.len();
for (i, v) in numbers.iter().enumerate() {
if v.to_owned() > 2020 {
continue;
}
for j in (i + 1)..size {
if v + numbers[j] == 2020 {
println!("Part1:Answer: {}", v * numbers[j])
}
for k in (j + 1)..size {
if v + numbers[j] + numbers[k] == 2020 {
println!("Part2:Answer: {}", v * numbers[j] * numbers[k])
}
}
}
}
}
|
use std::collections::HashMap;
use std::time::Duration;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use crate::model::{AlbumSimplified, ArtistSimplified, Context, Restrictions, TypeTrack};
macro_rules! inherit_track_simplified {
($(#[$attr:meta])* $name:ident { $($(#[$f_attr:meta])* $f_name:ident : $f_ty:ty,)* }) => {
to_struct!($(#[$attr])* $name {
$(
$(#[$f_attr])*
$f_name: $f_ty,
)*
/// The artists who performed the track.
artists: Vec<ArtistSimplified>,
/// The markets in which this track is available. Only [`Some`] if the market parameter
/// is not supplied in the request. This is an ISO-3166 2-letter country code.
available_markets: Option<Vec<String>>,
/// The disc number (1 unless the album contains more than one disc).
disc_number: usize,
/// The track length.
#[serde(rename = "duration_ms", with = "serde_millis")]
duration: Duration,
/// Whether the track has explicit lyrics, false if unknown.
explicit: bool,
/// Known external URLs for this track.
external_urls: HashMap<String, String>,
/// The [Spotify ID](https://developer.spotify.com/documentation/web-api/#spotify-uris-and-ids)
/// for this track. Only not present for a local track, which can only ever be obtained
/// from a playlist.
id: Option<String>,
/// When [track
/// relinking](https://developer.spotify.com/documentation/general/guides/track-relinking-guide/)
/// is applied, if the track is playable in the given market.
is_playable: Option<bool>,
/// When [track
/// relinking](https://developer.spotify.com/documentation/general/guides/track-relinking-guide/)
/// is applied and the requested track has been replaced by a different one.
linked_from: Option<TrackLink>,
/// When [track
/// relinking](https://developer.spotify.com/documentation/general/guides/track-relinking-guide/)
/// is applied, the original track isn't available in the given market and Spotify didn't have
/// any tracks to relink it with, then this is Some.
restrictions: Option<Restrictions>,
/// The name of the track.
name: String,
/// Link to a 30 second MP3 preview of the track, doesn't have to be there.
preview_url: Option<String>,
/// The 1-indexed number of the track in its album; if the track has several discs,
/// then it the number on the specified disc.
track_number: usize,
/// The item type; `track`.
#[serde(rename = "type")]
item_type: TypeTrack,
/// Whether the track is a local track.
is_local: bool,
});
}
}
inherit_track_simplified!(
/// A simplified track object.
TrackSimplified {}
);
inherit_track_simplified!(
/// A track object.
Track {
/// The album on which this track appears.
album: AlbumSimplified,
/// Known external IDs for this track.
external_ids: HashMap<String, String>,
/// The popularity of the track. The value will be between 0 and 100, with 100 being the most
/// popular. The popularity is calculated from the total number of plays and how recent they
/// are.
popularity: u32,
}
);
impl Track {
/// Convert to a `TrackSimplified`.
#[must_use]
pub fn simplify(self) -> TrackSimplified {
TrackSimplified {
artists: self.artists,
available_markets: self.available_markets,
disc_number: self.disc_number,
duration: self.duration,
explicit: self.explicit,
external_urls: self.external_urls,
id: self.id,
is_playable: self.is_playable,
linked_from: self.linked_from,
restrictions: self.restrictions,
name: self.name,
preview_url: self.preview_url,
track_number: self.track_number,
item_type: TypeTrack,
is_local: self.is_local,
}
}
}
impl From<Track> for TrackSimplified {
fn from(track: Track) -> Self {
track.simplify()
}
}
/// A link to a track.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct TrackLink {
/// Known external URLs for this track.
pub external_urls: HashMap<String, String>,
/// The [Spotify ID](https://developer.spotify.com/documentation/web-api/#spotify-uris-and-ids)
/// for this track.
pub id: String,
/// The item type; `track`.
#[serde(rename = "type")]
pub item_type: TypeTrack,
}
/// When and how a track was played.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct PlayHistory {
/// The track the user listened to.
pub track: TrackSimplified,
/// When the track was played.
pub played_at: DateTime<Utc>,
/// The context from which the track was played.
pub context: Option<Context>,
}
/// Information about a track that has been saved.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SavedTrack {
/// When the track was saved.
pub added_at: DateTime<Utc>,
/// Information about the track.
pub track: Track,
}
/// The number of tracks an object contains.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Tracks {
/// The number of tracks.
pub total: usize,
}
|
#[derive(Debug, Clone)]
struct Point {
x: i32,
y: i32
}
#[derive(Debug)]
enum Quadrant {
Origin,
XAxis,
YAxis,
TopLeft,
TopRight,
BottomLeft,
BottomRight
}
fn quadrant(p: Point) -> Quadrant {
match p {
Point { x: 0, y: 0 } => Quadrant::Origin,
Point { y: 0, .. } => Quadrant::XAxis,
Point { x: 0, .. } => Quadrant::YAxis,
Point { x, y } if x > 0 && y > 0 => Quadrant::TopRight,
Point { x, y } if x > 0 && y < 0 => Quadrant::BottomRight,
Point { x, y } if x < 0 && y < 0 => Quadrant::BottomLeft,
_ => Quadrant::TopLeft
}
}
fn main() {
let p1 = Point{ x: 0, y: 0};
let p2 = Point{ x: 0, y: 1};
let p3 = Point{ x: 1, y: 0};
let p4 = Point{ x: 1, y: -1};
let p5 = Point{ x: -1, y: 1};
let p6 = Point{ x: -1, y: -1};
let p7 = Point{ x: 1, y: 1};
let v = vec!(p1, p2, p3, p4, p5, p6, p7);
for p in v.into_iter() {
println!("{:?} site in {:?}", &p.clone(), quadrant(p));
}
}
|
use std::net::SocketAddr;
use clap::{Parser, ValueEnum};
use client::run_client;
use hydroflow::tokio;
use hydroflow::util::{bind_udp_lines, ipv4_resolve};
use server::run_server;
mod client;
mod helpers;
mod protocol;
mod server;
#[derive(Clone, ValueEnum, Debug)]
enum Role {
Client,
Server,
}
#[derive(Parser, Debug)]
struct Opts {
#[clap(value_enum, long)]
role: Role,
#[clap(long, value_parser = ipv4_resolve)]
client_addr: Option<SocketAddr>,
#[clap(long, value_parser = ipv4_resolve)]
server_addr: Option<SocketAddr>,
}
#[hydroflow::main]
async fn main() {
// parse command line arguments
let opts = Opts::parse();
let server_addr = opts.server_addr.unwrap();
// depending on the role, pass in arguments to the right function
match opts.role {
Role::Server => {
// allocate `outbound` and `inbound` sockets
let (outbound, inbound, _) = bind_udp_lines(server_addr).await;
println!("Listening on {:?}", server_addr);
run_server(outbound, inbound).await;
}
Role::Client => {
// allocate `outbound` sink and `inbound` stream
let client_addr = opts
.client_addr
.unwrap_or_else(|| ipv4_resolve("localhost:0").unwrap());
let (outbound, inbound, client_addr) = bind_udp_lines(client_addr).await;
println!(
"Client is bound to {:?}, connecting to Server at {:?}",
client_addr, server_addr
);
// run the client
run_client(outbound, inbound, server_addr).await;
}
}
}
#[test]
fn test() {
use std::io::Write;
use hydroflow::util::{run_cargo_example, wait_for_process_output};
let (_server, _, mut server_output) = run_cargo_example(
"echo_serde_json",
"--role server --server-addr 127.0.0.1:2049",
);
let (_client, mut client_input, mut client_output) = run_cargo_example(
"echo_serde_json",
"--role client --server-addr 127.0.0.1:2049",
);
let mut server_output_so_far = String::new();
let mut client_output_so_far = String::new();
wait_for_process_output(
&mut server_output_so_far,
&mut server_output,
"Server live!\n",
);
wait_for_process_output(
&mut client_output_so_far,
&mut client_output,
"Client live!\n",
);
client_input.write_all(b"Hello\n").unwrap();
wait_for_process_output(
&mut client_output_so_far,
&mut client_output,
"EchoMsg \\{ payload: \"Hello\", ts: .* \\}",
);
}
|
#![cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
use log::{Level, Log, Metadata, Record};
use wasm_bindgen::prelude::*;
// Use web-sys once it's published (rustwasm/wasm-bindgen#613)
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(js_namespace = console)]
fn error(s: &str);
#[wasm_bindgen(js_namespace = console)]
fn warn(s: &str);
#[wasm_bindgen(js_namespace = console)]
fn info(s: &str);
#[wasm_bindgen(js_namespace = console)]
fn debug(s: &str);
}
impl Log for WasmConsoleLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= Level::Trace
}
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
let log_fn = match record.level() {
Level::Error => error,
Level::Warn => warn,
Level::Info => info,
Level::Debug => debug,
Level::Trace => debug,
};
log_fn(&format!(
"{}: {}: {}",
record.level(),
record.module_path().unwrap_or("<no module>"),
record.args()
));
}
}
fn flush(&self) {}
}
pub struct WasmConsoleLogger;
pub static WASM_LOGGER: WasmConsoleLogger = WasmConsoleLogger;
|
pub use highpass::HighPass;
pub use lowpass::LowPass;
mod highpass;
mod lowpass;
/// Audio filter
pub trait Filter {
/// Filters an audio signal
fn filter(&mut self, input: f32) -> f32;
fn reset(&mut self);
}
|
use gpio::{Gpio, PinId, PinMode, PinState};
pub struct GpioMock;
impl Gpio for GpioMock {
fn pin_mode(&mut self, pin: PinId, mode: PinMode) {
}
fn digital_write(&mut self, pin: PinId, state: PinState) {
}
fn digital_read(&self, pin: PinId) -> PinState {
PinState::Low
}
}
|
use std::cell::{ RefCell, Ref, RefMut };
use crate::types::*;
use crate::spawns::*;
/// Pointer is a reference to objects in the scene, which is used to find and update these objects.
/// A Pointer can hold a reference to an object that doesn't exist anymore,
/// the exists(pointer) methode can be used to check a pointer before using it.
///
pub type Pointer = usize;
#[derive(Debug, PartialEq)]
pub enum SceneError {
Overflow, // spawned more items than the pool can hold.
OutOfBounds, // Pointer not within boundaries as where preset during new().
GroupNotFound, // Group not within boundaries as where preset during new().
FactoryNotFound, // There is no factory for this Group available
}
/// Scene is basically a manager for all entities and where to find them.
/// It uses object pooling by instantiating a fixed number of entities at startup,
/// in order to maintain a decent render speeds when creating and destoying entities
/// during the game.
/// Scene also provides tools for searching and borrowing spawned entities.
///
pub struct Scene<T: Entity> {
factories: Vec<Box::<dyn Factory<T>>>,
pool: Vec<RefCell<T>>,
spawns: Vec<Spawn>,
free: Vec<Pointer>,
in_use: Vec<Spawn>,
groups: Vec<Vec<Pointer>>,
itter_count: usize,
}
impl<T: Entity> Scene<T> {
/// Create a new Scene entity manager instance.
///
/// By setting the 'size' parameter, you preset the maximum amount of
/// Object the new pool can hold and therefore spawn.
///
/// Factories are custom object factories that implement the Factory trait
/// which can be called upon, by the Scene, when spawning a new object.
/// Factories give the user the freedom to create different types of
/// objects by customizing the output of these factories.
///
pub fn new(size: usize, factories: Vec<Box::<dyn Factory<T>>>) -> Self {
let mut pool: Vec<RefCell<T>> = Vec::new();
pool.resize_with(size, || { RefCell::new(T::default()) });
let mut spawns: Vec<Spawn> = Vec::new();
spawns.resize_with(size, Spawn::default);
let mut free: Vec<Pointer> = Vec::with_capacity(size);
let in_use: Vec<Spawn> = Vec::with_capacity(size);
let mut groups: Vec<Vec<Pointer>> = Vec::new();
groups.resize_with(factories.len(), Vec::new);
for i in 0..size {
spawns[i].pointer = i;
free.push(i);
}
for group in &mut groups {
group.resize_with(size, Pointer::default);
}
Scene { factories, pool, spawns, free, in_use, groups, itter_count: 0, }
}
pub fn get_factory(&self, group: &Group) -> &Box::<dyn Factory<T>> {
&self.factories[*group]
}
pub fn mut_factory(&mut self, group: &Group) -> &mut Box::<dyn Factory<T>> {
&mut self.factories[*group]
}
/// Returns a cloned list of spawn currently in use.
///
pub fn list_spawned(&self) -> Vec<Spawn> {
self.in_use.clone()
}
/// Returns a reference to a RefCell box containing the requested object.
/// If the spawned object has been destroyed the inactive object will still be returned.
/// You can use the methodes exists and exists_in_group to find out if objects are currently active.
///
pub fn get_ref(&self, spawn: &Spawn) -> Ref<T> {
self.pool[spawn.pointer].borrow()
}
/// Same as the get_ref methode but returns a mutable reference.
///
pub fn get_mut(&self, spawn: &Spawn) -> RefMut<T> {
self.pool[spawn.pointer].borrow_mut()
}
/// Run a custom test that tells if all active (spawned) objects comply to the predicate specified.
///
pub fn test_all<P> (&self, predicate: &mut P) -> bool
where P: FnMut(&T) -> bool
{
for spawn in &self.in_use {
if !predicate(&self.pool[spawn.pointer].borrow()) {
return false;
}
}
true
}
/// Find an active (spawned) object by its spawn name.
///
pub fn find_spawn(&self, name: &str) -> Option<Spawn> {
for spawn in &self.in_use {
if self.spawns[spawn.pointer].name() == name {
return Some(self.spawns[spawn.pointer].clone());
}
}
None
}
/// Find an active (spawned) object by its spawn name and factory group.
/// This methode can be faster as find_spawn, when there are multiple groups, sinds it does not need to itterate over all objects.
/// Find_in_group can also come in handy when using the same name in different groups (sinds spawn names do not need to be unique).
///
pub fn find_spawn_in_group(&self, name: &str, group: Group) -> Option<Spawn> {
if group >= self.groups.len() { return None; }
for pointer in &self.groups[group] {
if self.spawns[*pointer].name() == name {
return Some(self.spawns[*pointer].clone());
}
}
None
}
/// As find_spawn, but lets you write a custom predicate using object values.
///
pub fn search_components<P> (&self, mut predicate: P) -> Option<Spawn>
where P: FnMut(&T) -> bool {
for spawn in &self.in_use {
if predicate(&self.pool[spawn.pointer].borrow()) {
return Some(self.spawns[spawn.pointer].clone());
}
}
None
}
/// As find_spawn_in_group, but lets you write a custom predicate using object values.
///
pub fn search_components_in_group<P> (&self, group: Group, mut predicate: P) -> Option<Spawn>
where P: FnMut(&T) -> bool {
if group >= self.groups.len() { return None; }
for pointer in &self.groups[group] {
if predicate(&self.pool[*pointer].borrow()) {
return Some(self.spawns[*pointer].clone());
}
}
None
}
/// Compare an objects values, with the values of all other objects.
/// Returns an Option of the Spawn on which the predicate succeeded first, or None is all comparisons failed.
///
pub fn compare_against<F> (&self, against: Spawn, mut on_compare: F) -> Option<Spawn>
where F: FnMut(&T, &T) -> bool
{
for spawn in &self.in_use {
if on_compare(
&self.pool[against.pointer].borrow(),
&self.pool[spawn.pointer].borrow()
){
return Some(spawn.clone());
}
}
None
}
/// Compares all values of all objects to eachother.
/// Returns an Option of the two Spawn on which the predicate succeeded first, or None is all comparisons failed.
///
pub fn compare_all<F> (&self, mut on_compare: F) -> Option<(Spawn, Spawn)>
where F: FnMut(&T, &T) -> bool
{
for spawn_a in &self.in_use {
for spawn_b in &self.in_use {
if spawn_a == spawn_b { continue; }
else if on_compare(
&self.pool[spawn_a.pointer].borrow(),
&self.pool[spawn_b.pointer].borrow()
){
return Some( (spawn_a.clone(), spawn_b.clone()) );
}
}
}
None
}
/// Spawn a new object. Spawned objects are updated every frame by the core ECS system.
/// The spawn methode activates a new object that will inherit all the settings of the factory of the corresponding group.
/// A name must be added to the spawn, this can be used to find the spawn if necessary.
///
pub fn spawn(&mut self, name: &str, group: &Group) -> Result<Spawn, SceneError> {
if *group >= self.groups.len() {
return Err(SceneError::GroupNotFound);
}
match self.free.pop() {
Some(pointer) => {
self.spawns[pointer].pointer = pointer;
self.spawns[pointer].group = group.clone();
self.spawns[pointer].new_name(name);
self.in_use.push(self.spawns[pointer].clone());
self.groups[*group].push(pointer);
self.pool[pointer].replace(self.factories[*group].build(&self.spawns[pointer]));
Ok(self.spawns[pointer].clone())
},
None => Err(SceneError::Overflow)
}
}
/// Destroy an object. Destroy deactivates an object and therefore stops it from being updated by the core ECS system.
///
/// NOTE: Destroy is slow
pub fn destroy(&mut self, spawn: &Spawn) {
if let Some(u_index) = self.in_use.iter().position(
|x| x.pointer == spawn.pointer
) {
if let Some(g_index) = self.groups[spawn.group].iter().position(
|x| *x == spawn.pointer
) {
self.groups[spawn.group].remove(g_index);
}
self.in_use.remove(u_index);
self.free.push(spawn.pointer)
}
}
pub fn wipe(&mut self, pointer: &Pointer) {
self.pool[*pointer].replace(T::default());
}
/// Checks if the object at the Pointer position has been spawned (is active).
///
pub fn exists(&self, spawn: &Spawn) -> bool {
self.in_use.contains(spawn)
}
/// Checks if the object with a specific group tag, and Pointer position
/// has been spawned (is active).
///
/// Only check one group and, in case of many groups containing many objects,
/// will therefore be faster than looping through all spawned objects.
///
pub fn exists_in_group(&self, spawn: &Spawn, group: Group) -> bool {
self.groups[group].contains(spawn.pointer())
}
/// Returns the maximum capacity of the pool.
///
pub fn size(&self) -> usize {
self.pool.len()
}
} |
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::io::BufReader;
use crate::solution::ProblemSolution;
pub struct Solution {}
impl ProblemSolution for Solution {
fn name(&self) -> &'static str {
return "problem_05";
}
fn part1(&self) -> io::Result<i64> {
BufReader::new(File::open("./data/05.txt")?)
.lines()
.flat_map(|locator| find_seat(128, 8, locator.unwrap().as_str()))
.map(|(row, col)| seat_id(row, col))
.max()
.map(|x| x as i64) // shameful hack
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "invalid locator"))
}
fn part2(&self) -> io::Result<i64> {
let mut sorted_seat_ids = BufReader::new(File::open("./data/05.txt")?)
.lines()
.flat_map(|locator| find_seat(128, 8, locator.unwrap().as_str()))
.map(|(row, col)| seat_id(row, col))
.collect::<Vec<i32>>();
sorted_seat_ids.sort_unstable();
let mut it = sorted_seat_ids.iter();
it.next();
for (seat_id, next_seat_id) in sorted_seat_ids.iter().zip(it) {
if next_seat_id - seat_id == 2 {
return Ok((seat_id + 1) as i64);
}
}
Ok(0)
}
}
fn seat_id(row: i32, col: i32) -> i32 {
row * 8 + col
}
fn find_seat(rows: i32, cols: i32, locator: &str) -> Option<(i32, i32)> {
let mut row_min = 0;
let mut row_max = rows;
let mut col_min = 0;
let mut col_max = cols;
for c in locator.chars() {
match c {
'F' => row_max = (row_min + row_max) / 2,
'B' => row_min = (row_min + row_max) / 2,
'L' => col_max = (col_min + col_max) / 2,
'R' => col_min = (col_min + col_max) / 2,
_ => return None,
}
}
if row_min == row_max - 1 && col_min == col_max - 1 {
Some((row_min, col_min))
} else {
None
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_find_seat() {
let locator = "FBFBBFFRLR";
let location = find_seat(128, 8, locator).unwrap();
assert_eq!(location, (44, 5));
}
#[test]
fn test_find_seat_failure() {
let locators = vec![
"fake locator", // poorly structured
"FBBFRLR", // incomplete rows
"FBFBBFFRL", // incomplete columns
];
for locator in locators.into_iter() {
assert_eq!(find_seat(128, 8, locator), None);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_seat_id() {
let (row, col) = find_seat(128, 8, "BFFFBBFRRR").unwrap();
let id = seat_id(row, col);
assert_eq!(id, 567);
}
}
|
//! The `ArrayType` struct.
use SafeWrapper;
use ir::{SequentialType, CompositeType, Type};
use sys;
/// An array type.
pub struct ArrayType<'ctx>(SequentialType<'ctx>);
impl<'ctx> ArrayType<'ctx>
{
/// Creates a new array type.
pub fn new(element_type: &Type, num_elements: u64) -> Self {
unsafe {
let inner = sys::LLVMRustArrayTypeGet(element_type.inner(), num_elements);
wrap_type!(inner => CompositeType => SequentialType => ArrayType)
}
}
}
impl_subtype!(ArrayType => SequentialType);
|
#[doc = "Register `RESP1` reader"]
pub type R = crate::R<RESP1_SPEC>;
#[doc = "Field `CARDSTATUS1` reader - see Table 132."]
pub type CARDSTATUS1_R = crate::FieldReader<u32>;
impl R {
#[doc = "Bits 0:31 - see Table 132."]
#[inline(always)]
pub fn cardstatus1(&self) -> CARDSTATUS1_R {
CARDSTATUS1_R::new(self.bits)
}
}
#[doc = "response 1..4 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`resp1::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct RESP1_SPEC;
impl crate::RegisterSpec for RESP1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`resp1::R`](R) reader structure"]
impl crate::Readable for RESP1_SPEC {}
#[doc = "`reset()` method sets RESP1 to value 0"]
impl crate::Resettable for RESP1_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use std::io::BufReader;
use std::io::BufRead;
use std::fs::File;
fn main() {
part1();
part2();
}
fn part1() {
let f = File::open("data/day2.txt").unwrap();
let file = BufReader::new(&f);
let mut sum = 0;
for line in file.lines() {
let l = line.unwrap();
let mut nums: Vec<i32> = l.split("\t")
.map(|num_str| num_str.parse::<i32>().unwrap())
.collect::<Vec<i32>>();
nums.sort();
let min = nums.first().unwrap();
let max = nums.last().unwrap();
let diff = max - min;
sum += diff;
}
println!("Sum of differences: {}", sum);
}
fn part2() {
let f = File::open("data/day2.txt").unwrap();
let file = BufReader::new(&f);
let mut sum = 0;
for line in file.lines() {
let l = line.unwrap();
let mut nums: Vec<i32> = l.split("\t")
.map(|num_str| num_str.parse::<i32>().unwrap())
.collect::<Vec<i32>>();
nums.sort();
for x in nums.iter().rev() {
for y in nums.iter().rev() {
if x == y {
continue;
}
if x % y == 0 {
sum += x / y;
}
}
}
}
println!("Sum of evenly divided numbers: {}", sum);
} |
mod schema;
mod seed;
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_migrations;
use schema::seeds::dsl::*;
use diesel::{RunQueryDsl, PgConnection, Connection};
use dotenv;
use std::env;
use crate::seed::Seed;
embed_migrations!("migrations");
fn add(conn: &PgConnection, other_prefix: String) -> anyhow::Result<()> {
diesel::insert_into(seeds)
.values(&Seed {
prefix: other_prefix,
index: 0
}).execute(conn)?;
Ok(())
}
fn setup(conn: &PgConnection) -> anyhow::Result<()> {
embedded_migrations::run(conn)?;
Ok(())
}
fn print_usage() {
println!("Usage: \n\
jirachi [add] key1 key2 ...\n\
jirachi setup\n\
jirachi help\n\
\n\
add: Adds one or more keys to the jirachi database\n\
setup: Initializes the database\n\
help: Displays this help message")
}
fn main() {
dotenv::dotenv().ok();
let db_url = env::var("JIRACHI_DB_URL");
if db_url.is_err() {
println!("error: JIRACHI_DB_URL was not found in the environment.");
return;
}
let conn = PgConnection::establish(db_url.unwrap().as_str());
if let Err(e) = conn {
println!("error: Connection to database failed .. \n{}", e);
return;
}
let conn = conn.unwrap();
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
print_usage();
return;
}
match args[1].as_str() {
"add" => {
if args.len() < 3 {
print_usage();
return;
}
for i in 2..args.len() {
if let Err(e) = add(&conn, args[i].clone()) {
println!("error: Could not add key \n{}", e);
return;
}
println!("success: Added key {} to jirachi database.", args[i]);
}
},
"setup" => {
if let Err(e) = setup(&conn) {
println!("error: Could not setup database \n{}", e);
return;
}
println!("success: Initialized jirachi database");
},
"help" => {
print_usage()
},
_ => print_usage()
}
}
|
use std::net::SocketAddr;
use chrono::prelude::*;
use hydroflow::hydroflow_syntax;
use hydroflow::util::{UdpSink, UdpStream};
use crate::protocol::EchoMsg;
use crate::Opts;
pub(crate) async fn run_client(outbound: UdpSink, inbound: UdpStream, opts: Opts) {
// server_addr is required for client
let server_addr = opts.server_addr.expect("Client requires a server address");
println!("Client live!");
let mut flow = hydroflow_syntax! {
// Define shared inbound and outbound channels
inbound_chan = source_stream_serde(inbound)
// -> tee() // commented out since we only use this once in the client template
;
outbound_chan = // union() -> // commented out since we only use this once in the client template
dest_sink_serde(outbound);
// Print all messages for debugging purposes
inbound_chan
-> map(Result::unwrap)
-> for_each(|(m, a): (EchoMsg, SocketAddr)| println!("{}: Got {:?} from {:?}", Utc::now(), m, a));
// take stdin and send to server as an Message::Echo
source_stdin() -> map(|l| (EchoMsg{ payload: l.unwrap(), ts: Utc::now(), }, server_addr) )
-> outbound_chan;
};
flow.run_async().await.unwrap();
}
|
#[doc = "Reader of register CH_STATUS"]
pub type R = crate::R<u32, super::CH_STATUS>;
#[doc = "Reader of field `INTR_CAUSE`"]
pub type INTR_CAUSE_R = crate::R<u8, u8>;
impl R {
#[doc = "Bits 0:3 - Specifies the source of the interrupt cause: '0': NO_INTR '1': COMPLETION '2': SRC_BUS_ERROR '3': DST_BUS_ERROR '4': SRC_MISAL '5': DST_MISAL '6': CURR_PTR_NULL '7': ACTIVE_CH_DISABLED '8': DESCR_BUS_ERROR '9'-'15': Not used. For error related interrupt causes (INTR_CAUSE is '1', '2', '3', ..., '8'), the channel is disabled (HW sets CH_CTL.ENABLED to '0')."]
#[inline(always)]
pub fn intr_cause(&self) -> INTR_CAUSE_R {
INTR_CAUSE_R::new((self.bits & 0x0f) as u8)
}
}
|
// Problem 4 - Largest palindrome product
//
// A palindromic number reads the same both ways. The largest palindrome made
// from the product of two 2-digit numbers is 9009 = 91 × 99.
//
// Find the largest palindrome made from the product of two 3-digit numbers.
fn main() {
println!("{}", solution());
}
fn solution() -> i32 {
let mut result = 0;
for a in 100..1000 {
for b in 100..(a+1) {
let n = a*b;
if n > result && palindrome(n) {
result = n;
}
}
}
result
}
fn palindrome(n: i32) -> bool {
let ds = digits(n);
let mut rds = ds.to_vec();
rds.reverse();
equal(ds, rds)
}
fn equal(a: Vec<i32>, b: Vec<i32>) -> bool {
a.iter().zip(b.iter()).all(|(_a, _b)| _a == _b)
}
fn digits(n: i32) -> Vec<i32> {
let mut current = n;
let mut ndigits = vec![];
while current > 0 {
ndigits.push(current % 10);
current /= 10;
}
ndigits.reverse();
ndigits
}
|
// This file is part of Substrate.
// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6
#![allow(unused_parens)]
#![allow(unused_imports)]
use frame_support::{traits::Get, weights::Weight};
use sp_std::marker::PhantomData;
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Trait> pallet_identity::WeightInfo for WeightInfo<T> {
fn add_registrar(r: u32) -> Weight {
(39_603_000 as Weight)
.saturating_add((418_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_identity(r: u32, x: u32) -> Weight {
(110_679_000 as Weight)
.saturating_add((389_000 as Weight).saturating_mul(r as Weight))
.saturating_add((2_985_000 as Weight).saturating_mul(x as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_subs_new(s: u32) -> Weight {
(78_697_000 as Weight)
.saturating_add((15_225_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight)))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight)))
}
fn set_subs_old(p: u32) -> Weight {
(71_308_000 as Weight)
.saturating_add((5_772_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight)))
}
fn clear_identity(r: u32, s: u32, x: u32) -> Weight {
(91_553_000 as Weight)
.saturating_add((284_000 as Weight).saturating_mul(r as Weight))
.saturating_add((5_749_000 as Weight).saturating_mul(s as Weight))
.saturating_add((1_621_000 as Weight).saturating_mul(x as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight)))
}
fn request_judgement(r: u32, x: u32) -> Weight {
(110_856_000 as Weight)
.saturating_add((496_000 as Weight).saturating_mul(r as Weight))
.saturating_add((3_221_000 as Weight).saturating_mul(x as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn cancel_request(r: u32, x: u32) -> Weight {
(96_857_000 as Weight)
.saturating_add((311_000 as Weight).saturating_mul(r as Weight))
.saturating_add((3_204_000 as Weight).saturating_mul(x as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_fee(r: u32) -> Weight {
(16_276_000 as Weight)
.saturating_add((381_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_account_id(r: u32) -> Weight {
(18_530_000 as Weight)
.saturating_add((391_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_fields(r: u32) -> Weight {
(16_359_000 as Weight)
.saturating_add((379_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn provide_judgement(r: u32, x: u32) -> Weight {
(72_869_000 as Weight)
.saturating_add((423_000 as Weight).saturating_mul(r as Weight))
.saturating_add((3_187_000 as Weight).saturating_mul(x as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn kill_identity(r: u32, s: u32, x: u32) -> Weight {
(123_199_000 as Weight)
.saturating_add((71_000 as Weight).saturating_mul(r as Weight))
.saturating_add((5_730_000 as Weight).saturating_mul(s as Weight))
.saturating_add((2_000 as Weight).saturating_mul(x as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight)))
}
fn add_sub(s: u32) -> Weight {
(110_070_000 as Weight)
.saturating_add((262_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn rename_sub(s: u32) -> Weight {
(37_130_000 as Weight)
.saturating_add((79_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn remove_sub(s: u32) -> Weight {
(103_295_000 as Weight)
.saturating_add((235_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn quit_sub(s: u32) -> Weight {
(65_716_000 as Weight)
.saturating_add((227_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
}
|
use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
use std::iter::once;
pub fn win32_string( value : &str ) -> Vec<u16> {
OsStr::new( value ).encode_wide().chain( once( 0 ) ).collect()
}
|
use serenity::framework::standard::{macros::command, Args, CommandError, CommandResult};
use serenity::model::channel::Message;
use serenity::prelude::*;
use uwuifier::uwuify_str_sse;
#[command]
#[usage = "[input text]"]
#[aliases("owo")]
#[description = "OwOfy your text, cause why not."]
async fn owoify(context: &Context, message: &Message, args: Args) -> CommandResult {
if args.is_empty() {
return Err(CommandError::from("You need to input text to convert."));
}
let input = args.message();
let output = uwuify_str_sse(input);
let _ = message.delete(&context).await;
let _ = message
.channel_id
.send_message(&context, |m| {
m.content(format!("<@{}> said: {}", message.author.id, output))
})
.await;
Ok(())
}
|
#[macro_use]
extern crate lazy_static;
use tokio::net::TcpListener;
use tokio::prelude::*;
use crate::http::request::Request;
use crate::http::response::{Response, Status};
use std::collections::HashMap;
use std::sync::{Mutex, Arc};
mod parser;
pub mod http;
pub type Handler = dyn Fn(Request, &mut Response) + Send + Sync + 'static;
pub struct Server {
handlers: HashMap<&'static str, Arc<Handler>>
}
impl Server {
pub fn new() -> Self {
Server {
handlers: HashMap::new()
}
}
pub fn add_handler(&mut self, uri: &'static str, handler: Arc<Handler>) {
self.handlers.insert(uri, handler.clone());
}
pub async fn start_at(self, address: &str) -> Result<(), Box<dyn std::error::Error>> {
let mut listener = TcpListener::bind(address).await?;
loop {
let (mut socket, _) = listener.accept().await?;
let handlers = self.handlers.clone();
tokio::spawn(async move {
let mut buffer = [0; 4096];
socket.read(&mut buffer).await;
let result = parser::parse(std::str::from_utf8(&buffer).unwrap());
let request = result.unwrap().1;
let mut response = Response::new();
let uri = request.uri.path.clone();
let handler = handlers.get(uri.as_str());
if let Some(handler) = handler {
handler(request, &mut response);
} else {
response.status = Status::BadRequest;
}
socket.write(response.marshal().as_bytes()).await;
});
}
}
}
#[cfg(test)]
mod tests {
use crate::Server;
#[tokio::test]
async fn it_works() {
let future = Server::new().start_at("127.0.0.1:8000");
future.await;
}
}
|
extern crate arith;
use std::io::{self, Read};
use arith::{eval, parse};
fn main() {
let mut buffer = String::new();
match io::stdin().read_to_string(&mut buffer) {
Ok(_) => {
let program = parse(&buffer);
match program {
Ok(term) => {
println!("Source program: {}", term);
println!("Evaluated program: {}", eval(&term));
}
Err(e) => println!("Parse error: {:?}", e),
}
}
_ => println!("Could not read from STDIN!"),
}
}
|
use std::cmp::{max};
use std::io::{BufWriter, stdin, stdout, Write};
#[derive(Default)]
struct Scanner {
buffer: Vec<String>
}
impl Scanner {
fn next<T: std::str::FromStr>(&mut self) -> T {
loop {
if let Some(token) = self.buffer.pop() {
return token.parse().ok().expect("Failed parse");
}
let mut input = String::new();
stdin().read_line(&mut input).expect("Faild read");
self.buffer = input.split_whitespace().rev().map(String::from).collect();
}
}
}
fn main() {
let mut scan = Scanner::default();
let out = &mut BufWriter::new(stdout());
let n = scan.next::<usize>();
let k = scan.next::<usize>();
let d = scan.next::<i64>();
let a: Vec<i64> = (0..n).map(|_| scan.next::<i64>()).collect::<Vec<_>>();
let mut dp = vec![vec![vec![-1; d as usize]; k + 1]; n];
dp[0][0][0] = 0;
dp[0][1][(a[0] % d) as usize] = a[0];
for i in 1..n {
for j in 0..(k + 1) {
for h in 0..d {
let e = h as usize;
dp[i][j][e] = dp[i - 1][j][e];
if j > 0 {
let c = (((h - a[i]) % d + d) % d) as usize;
if dp[i - 1][j - 1][c] != -1 {
dp[i][j][e] = max(dp[i][j][e], dp[i - 1][j - 1][c] + a[i]);
}
}
// writeln!(out, "{}, {}, {}, {}", i, j, h, dp[i][j][e]).ok();
}
}
}
writeln!(out, "{}", dp[n - 1][k][0]).ok();
} |
// This file contains logic to define how templates are rendered
use crate::default_headers::default_headers;
use crate::errors::*;
use crate::Request;
use crate::SsrNode;
use crate::Translator;
use futures::Future;
use http::header::HeaderMap;
use std::collections::HashMap;
use std::pin::Pin;
use std::rc::Rc;
use sycamore::context::{ContextProvider, ContextProviderProps};
use sycamore::prelude::{template, GenericNode, Template as SycamoreTemplate};
/// This encapsulates all elements of context currently provided to Perseus templates. While this can be used manually, there are macros
/// to make this easier for each thing in here.
#[derive(Clone)]
pub struct RenderCtx {
/// Whether or not we're being executed on the server-side. This can be used to gate `web_sys` functions and the like that expect
/// to be run in the browser.
pub is_server: bool,
/// A translator for templates to use. This will still be present in non-i18n apps, but it will have no message IDs and support for
/// the non-existent locale `xx-XX`.
pub translator: Rc<Translator>,
}
/// Represents all the different states that can be generated for a single template, allowing amalgamation logic to be run with the knowledge
/// of what did what (rather than blindly working on a vector).
#[derive(Default)]
pub struct States {
/// Any state generated by the *build state* strategy.
pub build_state: Option<String>,
/// Any state generated by the *request state* strategy.
pub request_state: Option<String>,
}
impl States {
/// Creates a new instance of the states, setting both to `None`.
pub fn new() -> Self {
Self::default()
}
/// Checks if both request state and build state are defined.
pub fn both_defined(&self) -> bool {
self.build_state.is_some() && self.request_state.is_some()
}
/// Gets the only defined state if only one is defined. If no states are defined, this will just return `None`. If both are defined,
/// this will return an error.
pub fn get_defined(&self) -> Result<Option<String>, ServeError> {
if self.both_defined() {
return Err(ServeError::BothStatesDefined);
}
if self.build_state.is_some() {
Ok(self.build_state.clone())
} else if self.request_state.is_some() {
Ok(self.request_state.clone())
} else {
Ok(None)
}
}
}
/// A generic error type that can be adapted for any errors the user may want to return from a render function. `.into()` can be used
/// to convert most error types into this without further hassle. Otherwise, use `Box::new()` on the type.
pub type RenderFnResult<T> = std::result::Result<T, Box<dyn std::error::Error>>;
/// A generic error type that can be adapted for any errors the user may want to return from a render function, as with `RenderFnResult<T>`.
/// However, this also includes a mandatory statement of causation for any errors, which assigns blame for them to either the client
/// or the server. In cases where this is ambiguous, this allows returning accurate HTTP status codes.
///
/// Note that you can automatically convert from your error type into this with `.into()` or `?`, which will blame the server for the
/// error by default and return a *500 Internal Server Error* HTTP status code. Otherwise, you'll need to manually instantiate `ErrorWithCause`
/// and return that as the error type.
pub type RenderFnResultWithCause<T> = std::result::Result<T, GenericErrorWithCause>;
/// A generic return type for asynchronous functions that we need to store in a struct.
type AsyncFnReturn<T> = Pin<Box<dyn Future<Output = T>>>;
/// Creates traits that prevent users from having to pin their functions' return types. We can't make a generic one until desugared function
/// types are stabilized (https://github.com/rust-lang/rust/issues/29625).
macro_rules! make_async_trait {
($name:ident, $return_ty:ty$(, $arg_name:ident: $arg:ty)*) => {
// These traits should be purely internal, the user is likely to shoot themselves in the foot
#[doc(hidden)]
pub trait $name {
fn call(
&self,
// Each given argument is repeated
$(
$arg_name: $arg,
)*
) -> AsyncFnReturn<$return_ty>;
}
impl<T, F> $name for T
where
T: Fn(
$(
$arg,
)*
) -> F,
F: Future<Output = $return_ty> + 'static,
{
fn call(
&self,
$(
$arg_name: $arg,
)*
) -> AsyncFnReturn<$return_ty> {
Box::pin(self(
$(
$arg_name,
)*
))
}
}
};
}
// A series of asynchronous closure traits that prevent the user from having to pin their functions
make_async_trait!(GetBuildPathsFnType, RenderFnResult<Vec<String>>);
// The build state strategy needs an error cause if it's invoked from incremental
make_async_trait!(
GetBuildStateFnType,
RenderFnResultWithCause<String>,
path: String,
locale: String
);
make_async_trait!(
GetRequestStateFnType,
RenderFnResultWithCause<String>,
path: String,
locale: String,
req: Request
);
make_async_trait!(ShouldRevalidateFnType, RenderFnResultWithCause<bool>);
// A series of closure types that should not be typed out more than once
/// The type of functions that are given a state and render a page. If you've defined state for your page, it's safe to `.unwrap()` the
/// given `Option`. If you're using i18n, an `Rc<Translator>` will also be made available through Sycamore's [context system](https://sycamore-rs.netlify.app/docs/advanced/advanced_reactivity).
pub type TemplateFn<G> = Rc<dyn Fn(Option<String>) -> SycamoreTemplate<G>>;
/// A type alias for the function that modifies the document head. This is just a template function that will always be server-side
/// rendered in function (it may be rendered on the client, but it will always be used to create an HTML string, rather than a reactive
/// template).
pub type HeadFn = TemplateFn<SsrNode>;
/// The type of functions that modify HTTP response headers.
pub type SetHeadersFn = Rc<dyn Fn(Option<String>) -> HeaderMap>;
/// The type of functions that get build paths.
pub type GetBuildPathsFn = Rc<dyn GetBuildPathsFnType>;
/// The type of functions that get build state.
pub type GetBuildStateFn = Rc<dyn GetBuildStateFnType>;
/// The type of functions that get request state.
pub type GetRequestStateFn = Rc<dyn GetRequestStateFnType>;
/// The type of functions that check if a template sghould revalidate.
pub type ShouldRevalidateFn = Rc<dyn ShouldRevalidateFnType>;
/// The type of functions that amalgamate build and request states.
pub type AmalgamateStatesFn = Rc<dyn Fn(States) -> RenderFnResultWithCause<Option<String>>>;
/// This allows the specification of all the template templates in an app and how to render them. If no rendering logic is provided at all,
/// the template will be prerendered at build-time with no state. All closures are stored on the heap to avoid hellish lifetime specification.
/// All properties for templates are passed around as strings to avoid type maps and other horrible things, this only adds one extra
/// deserialization call at build time. This only actually owns a two `String`s and a `bool`.
#[derive(Clone)]
pub struct Template<G: GenericNode> {
/// The path to the root of the template. Any build paths will be inserted under this.
path: String,
/// A function that will render your template. This will be provided the rendered properties, and will be used whenever your template needs
/// to be prerendered in some way. This should be very similar to the function that hydrates your template on the client side.
/// This will be executed inside `sycamore::render_to_string`, and should return a `Template<SsrNode>`. This takes an `Option<Props>`
/// because otherwise efficient typing is almost impossible for templates without any properties (solutions welcome in PRs!).
template: TemplateFn<G>,
/// A function that will be used to populate the document's `<head>` with metadata such as the title. This will be passed state in
/// the same way as `template`, but will always be rendered to a string, whcih will then be interpolated directly into the `<head>`,
/// so reactivity here will not work!
head: TemplateFn<SsrNode>,
/// A function to be run when the server returns an HTTP response. This should return headers for said response, given the template's
/// state. The most common use-case of this is to add cache control that respects revalidation. This will only be run on successful
/// responses, and does have the power to override existing headers. By default, this will create sensible cache control headers.
set_headers: SetHeadersFn,
/// A function that gets the paths to render for at built-time. This is equivalent to `get_static_paths` in NextJS. If
/// `incremental_generation` is `true`, more paths can be rendered at request time on top of these.
get_build_paths: Option<GetBuildPathsFn>,
/// Defines whether or not any new paths that match this template will be prerendered and cached in production. This allows you to
/// have potentially billions of templates and retain a super-fast build process. The first user will have an ever-so-slightly slower
/// experience, and everyone else gets the beneftis afterwards. This requires `get_build_paths`. Note that the template root will NOT
/// be rendered on demand, and must be explicitly defined if it's wanted. It can uuse a different template.
incremental_generation: bool,
/// A function that gets the initial state to use to prerender the template at build time. This will be passed the path of the template, and
/// will be run for any sub-paths. This is equivalent to `get_static_props` in NextJS.
get_build_state: Option<GetBuildStateFn>,
/// A function that will run on every request to generate a state for that request. This allows server-side-rendering. This is equivalent
/// to `get_server_side_props` in NextJS. This can be used with `get_build_state`, though custom amalgamation logic must be provided.
get_request_state: Option<GetRequestStateFn>,
/// A function to be run on every request to check if a template prerendered at build-time should be prerendered again. This is equivalent
/// to revalidation after a time in NextJS, with the improvement of custom logic. If used with `revalidate_after`, this function will
/// only be run after that time period. This function will not be parsed anything specific to the request that invoked it.
should_revalidate: Option<ShouldRevalidateFn>,
/// A length of time after which to prerender the template again. This is equivalent to revalidating in NextJS. This should specify a
/// string interval to revalidate after. That will be converted into a datetime to wait for, which will be updated after every revalidation.
/// Note that, if this is used with incremental generation, the counter will only start after the first render (meaning if you expect
/// a weekly re-rendering cycle for all pages, they'd likely all be out of sync, you'd need to manually implement that with
/// `should_revalidate`).
revalidate_after: Option<String>,
/// Custom logic to amalgamate potentially different states generated at build and request time. This is only necessary if your template
/// uses both `build_state` and `request_state`. If not specified and both are generated, request state will be prioritized.
amalgamate_states: Option<AmalgamateStatesFn>,
}
impl<G: GenericNode> Template<G> {
/// Creates a new template definition.
pub fn new(path: impl Into<String> + std::fmt::Display) -> Self {
Self {
path: path.to_string(),
template: Rc::new(|_: Option<String>| sycamore::template! {}),
// Unlike `template`, this may not be set at all (especially in very simple apps)
head: Rc::new(|_: Option<String>| sycamore::template! {}),
// We create sensible header defaults here
set_headers: Rc::new(|_: Option<String>| default_headers()),
get_build_paths: None,
incremental_generation: false,
get_build_state: None,
get_request_state: None,
should_revalidate: None,
revalidate_after: None,
amalgamate_states: None,
}
}
// Render executors
/// Executes the user-given function that renders the template on the server-side (build or request time).
pub fn render_for_template(
&self,
props: Option<String>,
translator: Rc<Translator>,
is_server: bool,
) -> SycamoreTemplate<G> {
template! {
// We provide the translator through context, which avoids having to define a separate variable for every translation due to Sycamore's `template!` macro taking ownership with `move` closures
ContextProvider(ContextProviderProps {
value: RenderCtx {
is_server,
translator: Rc::clone(&translator)
},
children: || (self.template)(props)
})
}
}
/// Executes the user-given function that renders the document `<head>`, returning a string to be interpolated manually. Reactivity
/// in this function will not take effect due to this string rendering. Note that this function will provide a translator context.
pub fn render_head_str(&self, props: Option<String>, translator: Rc<Translator>) -> String {
sycamore::render_to_string(|| {
template! {
// We provide the translator through context, which avoids having to define a separate variable for every translation due to Sycamore's `template!` macro taking ownership with `move` closures
ContextProvider(ContextProviderProps {
value: RenderCtx {
// This function renders to a string, so we're effectively always on the server
// It's also only ever run on the server
is_server: true,
translator: Rc::clone(&translator)
},
children: || (self.head)(props)
})
}
})
}
/// Gets the list of templates that should be prerendered for at build-time.
pub async fn get_build_paths(&self) -> Result<Vec<String>, ServerError> {
if let Some(get_build_paths) = &self.get_build_paths {
let res = get_build_paths.call().await;
match res {
Ok(res) => Ok(res),
Err(err) => Err(ServerError::RenderFnFailed {
fn_name: "get_build_paths".to_string(),
template_name: self.get_path(),
cause: ErrorCause::Server(None),
source: err,
}),
}
} else {
Err(BuildError::TemplateFeatureNotEnabled {
template_name: self.path.clone(),
feature_name: "build_paths".to_string(),
}
.into())
}
}
/// Gets the initial state for a template. This needs to be passed the full path of the template, which may be one of those generated by
/// `.get_build_paths()`. This also needs the locale being rendered to so that more compelx applications like custom documentation
/// systems can be enabled.
pub async fn get_build_state(
&self,
path: String,
locale: String,
) -> Result<String, ServerError> {
if let Some(get_build_state) = &self.get_build_state {
let res = get_build_state.call(path, locale).await;
match res {
Ok(res) => Ok(res),
Err(GenericErrorWithCause { error, cause }) => Err(ServerError::RenderFnFailed {
fn_name: "get_build_state".to_string(),
template_name: self.get_path(),
cause,
source: error,
}),
}
} else {
Err(BuildError::TemplateFeatureNotEnabled {
template_name: self.path.clone(),
feature_name: "build_state".to_string(),
}
.into())
}
}
/// Gets the request-time state for a template. This is equivalent to SSR, and will not be performed at build-time. Unlike
/// `.get_build_paths()` though, this will be passed information about the request that triggered the render. Errors here can be caused
/// by either the server or the client, so the user must specify an [`ErrorCause`]. This is also passed the locale being rendered to.
pub async fn get_request_state(
&self,
path: String,
locale: String,
req: Request,
) -> Result<String, ServerError> {
if let Some(get_request_state) = &self.get_request_state {
let res = get_request_state.call(path, locale, req).await;
match res {
Ok(res) => Ok(res),
Err(GenericErrorWithCause { error, cause }) => Err(ServerError::RenderFnFailed {
fn_name: "get_request_state".to_string(),
template_name: self.get_path(),
cause,
source: error,
}),
}
} else {
Err(BuildError::TemplateFeatureNotEnabled {
template_name: self.path.clone(),
feature_name: "request_state".to_string(),
}
.into())
}
}
/// Amalagmates given request and build states. Errors here can be caused by either the server or the client, so the user must specify
/// an [`ErrorCause`].
pub fn amalgamate_states(&self, states: States) -> Result<Option<String>, ServerError> {
if let Some(amalgamate_states) = &self.amalgamate_states {
let res = amalgamate_states(states);
match res {
Ok(res) => Ok(res),
Err(GenericErrorWithCause { error, cause }) => Err(ServerError::RenderFnFailed {
fn_name: "amalgamate_states".to_string(),
template_name: self.get_path(),
cause,
source: error,
}),
}
} else {
Err(BuildError::TemplateFeatureNotEnabled {
template_name: self.path.clone(),
feature_name: "request_state".to_string(),
}
.into())
}
}
/// Checks, by the user's custom logic, if this template should revalidate. This function isn't presently parsed anything, but has
/// network access etc., and can really do whatever it likes. Errors here can be caused by either the server or the client, so the
/// user must specify an [`ErrorCause`].
pub async fn should_revalidate(&self) -> Result<bool, ServerError> {
if let Some(should_revalidate) = &self.should_revalidate {
let res = should_revalidate.call().await;
match res {
Ok(res) => Ok(res),
Err(GenericErrorWithCause { error, cause }) => Err(ServerError::RenderFnFailed {
fn_name: "should_revalidate".to_string(),
template_name: self.get_path(),
cause,
source: error,
}),
}
} else {
Err(BuildError::TemplateFeatureNotEnabled {
template_name: self.path.clone(),
feature_name: "should_revalidate".to_string(),
}
.into())
}
}
/// Gets the template's headers for the given state. These will be inserted into any successful HTTP responses for this template,
/// and they have the power to override.
pub fn get_headers(&self, state: Option<String>) -> HeaderMap {
(self.set_headers)(state)
}
// Value getters
/// Gets the path of the template. This is the root path under which any generated pages will be served. In the simplest case, there will
/// only be one page rendered, and it will occupy that root position.
pub fn get_path(&self) -> String {
self.path.clone()
}
/// Gets the interval after which the template will next revalidate.
pub fn get_revalidate_interval(&self) -> Option<String> {
self.revalidate_after.clone()
}
// Render characteristic checkers
/// Checks if this template can revalidate existing prerendered templates.
pub fn revalidates(&self) -> bool {
self.should_revalidate.is_some() || self.revalidate_after.is_some()
}
/// Checks if this template can revalidate existing prerendered templates after a given time.
pub fn revalidates_with_time(&self) -> bool {
self.revalidate_after.is_some()
}
/// Checks if this template can revalidate existing prerendered templates based on some given logic.
pub fn revalidates_with_logic(&self) -> bool {
self.should_revalidate.is_some()
}
/// Checks if this template can render more templates beyond those paths it explicitly defines.
pub fn uses_incremental(&self) -> bool {
self.incremental_generation
}
/// Checks if this template is a template to generate paths beneath it.
pub fn uses_build_paths(&self) -> bool {
self.get_build_paths.is_some()
}
/// Checks if this template needs to do anything on requests for it.
pub fn uses_request_state(&self) -> bool {
self.get_request_state.is_some()
}
/// Checks if this template needs to do anything at build time.
pub fn uses_build_state(&self) -> bool {
self.get_build_state.is_some()
}
/// Checks if this template has custom logic to amalgamate build and reqquest states if both are generated.
pub fn can_amalgamate_states(&self) -> bool {
self.amalgamate_states.is_some()
}
/// Checks if this template defines no rendering logic whatsoever. Such templates will be rendered using SSG. Basic templates can
/// still modify headers.
pub fn is_basic(&self) -> bool {
!self.uses_build_paths()
&& !self.uses_build_state()
&& !self.uses_request_state()
&& !self.revalidates()
&& !self.uses_incremental()
}
// Builder setters
/// Sets the template rendering function to use.
pub fn template(mut self, val: TemplateFn<G>) -> Template<G> {
self.template = val;
self
}
/// Sets the document head rendering function to use.
pub fn head(mut self, val: HeadFn) -> Template<G> {
self.head = val;
self
}
/// Sets the function to set headers. This will override Perseus' inbuilt header defaults.
pub fn set_headers_fn(mut self, val: SetHeadersFn) -> Template<G> {
self.set_headers = val;
self
}
/// Enables the *build paths* strategy with the given function.
pub fn build_paths_fn(mut self, val: GetBuildPathsFn) -> Template<G> {
self.get_build_paths = Some(val);
self
}
/// Enables the *incremental generation* strategy.
pub fn incremental_generation(mut self) -> Template<G> {
self.incremental_generation = true;
self
}
/// Enables the *build state* strategy with the given function.
pub fn build_state_fn(mut self, val: GetBuildStateFn) -> Template<G> {
self.get_build_state = Some(val);
self
}
/// Enables the *request state* strategy with the given function.
pub fn request_state_fn(mut self, val: GetRequestStateFn) -> Template<G> {
self.get_request_state = Some(val);
self
}
/// Enables the *revalidation* strategy (logic variant) with the given function.
pub fn should_revalidate_fn(mut self, val: ShouldRevalidateFn) -> Template<G> {
self.should_revalidate = Some(val);
self
}
/// Enables the *revalidation* strategy (time variant). This takes a time string of a form like `1w` for one week. More details are available
/// [in the book](https://arctic-hen7.github.io/perseus/strategies/revalidation.html#time-syntax).
pub fn revalidate_after(mut self, val: String) -> Template<G> {
self.revalidate_after = Some(val);
self
}
/// Enables state amalgamation with the given function.
pub fn amalgamate_states_fn(mut self, val: AmalgamateStatesFn) -> Template<G> {
self.amalgamate_states = Some(val);
self
}
}
/// Gets a `HashMap` of the given templates by their paths for serving. This should be manually wrapped for the pages your app provides
/// for convenience.
#[macro_export]
macro_rules! get_templates_map {
[
$($template:expr),+
] => {
{
let mut map = ::std::collections::HashMap::new();
$(
map.insert(
$template.get_path(),
$template
);
)+
map
}
};
}
/// A type alias for a `HashMap` of `Template`s.
pub type TemplateMap<G> = HashMap<String, Template<G>>;
/// Checks if we're on the server or the client. This must be run inside a reactive scope (e.g. a `template!` or `create_effect`),
/// because it uses Sycamore context.
#[macro_export]
macro_rules! is_server {
() => {{
let render_ctx = ::sycamore::context::use_context::<::perseus::template::RenderCtx>();
render_ctx.is_server
}};
}
|
//! Encode utilities
use lib::*;
use super::ValueType;
use super::util::{pad_u32, pad_i32, pad_i64, pad_u64, Hash};
fn pad_bytes(bytes: &[u8]) -> Vec<Hash> {
let mut result = vec![pad_u32(bytes.len() as u32)];
result.extend(pad_fixed_bytes(bytes));
result
}
fn pad_fixed_bytes(bytes: &[u8]) -> Vec<Hash> {
let mut result = vec![];
let len = (bytes.len() + 31) / 32;
for i in 0..len {
let mut padded = [0u8; 32];
let to_copy = match i == len - 1 {
false => 32,
true => match bytes.len() % 32 {
0 => 32,
x => x,
},
};
let offset = 32 * i;
padded[..to_copy].copy_from_slice(&bytes[offset..offset + to_copy]);
result.push(padded);
}
result
}
#[derive(Debug)]
enum Mediate {
Raw(Vec<Hash>),
Prefixed(Vec<Hash>),
#[allow(dead_code)] // might be used later
FixedArray(Vec<Mediate>),
Array(Vec<Mediate>),
}
impl Mediate {
fn init_len(&self) -> u32 {
match *self {
Mediate::Raw(ref raw) => 32 * raw.len() as u32,
Mediate::Prefixed(_) => 32,
Mediate::FixedArray(ref nes) => nes.iter().fold(0, |acc, m| acc + m.init_len()),
Mediate::Array(_) => 32,
}
}
fn closing_len(&self) -> u32 {
match *self {
Mediate::Raw(_) => 0,
Mediate::Prefixed(ref pre) => pre.len() as u32 * 32,
Mediate::FixedArray(ref nes) => nes.iter().fold(0, |acc, m| acc + m.closing_len()),
Mediate::Array(ref nes) => nes.iter().fold(32, |acc, m| acc + m.init_len() + m.closing_len()),
}
}
fn offset_for(mediates: &[Mediate], position: usize) -> u32 {
assert!(position < mediates.len());
let init_len = mediates.iter().fold(0, |acc, m| acc + m.init_len());
mediates[0..position].iter().fold(init_len, |acc, m| acc + m.closing_len())
}
fn init(&self, suffix_offset: u32) -> Vec<Hash> {
match *self {
Mediate::Raw(ref raw) => raw.clone(),
Mediate::FixedArray(ref nes) => {
nes.iter()
.enumerate()
.flat_map(|(i, m)| m.init(Mediate::offset_for(nes, i)))
.collect()
},
Mediate::Prefixed(_) | Mediate::Array(_) => {
vec![pad_u32(suffix_offset)]
}
}
}
fn closing(&self, offset: u32) -> Vec<Hash> {
match *self {
Mediate::Raw(_) => vec![],
Mediate::Prefixed(ref pre) => pre.clone(),
Mediate::FixedArray(ref nes) => {
// offset is not taken into account, cause it would be counted twice
// fixed array is just raw representations of similar consecutive items
nes.iter()
.enumerate()
.flat_map(|(i, m)| m.closing(Mediate::offset_for(nes, i)))
.collect()
},
Mediate::Array(ref nes) => {
// + 32 added to offset represents len of the array prepanded to closing
let prefix = vec![pad_u32(nes.len() as u32)].into_iter();
let inits = nes.iter()
.enumerate()
.flat_map(|(i, m)| m.init(offset + Mediate::offset_for(nes, i) + 32));
let closings = nes.iter()
.enumerate()
.flat_map(|(i, m)| m.closing(offset + Mediate::offset_for(nes, i)));
prefix.chain(inits).chain(closings).collect()
},
}
}
}
/// Encodes vector of tokens into ABI compliant vector of bytes.
pub fn encode(tokens: &[ValueType]) -> Vec<u8> {
let mediates: Vec<Mediate> = tokens.iter()
.map(encode_token)
.collect();
let inits = mediates.iter()
.enumerate()
.flat_map(|(i, m)| m.init(Mediate::offset_for(&mediates, i)));
let closings = mediates.iter()
.enumerate()
.flat_map(|(i, m)| m.closing(Mediate::offset_for(&mediates, i)));
inits.chain(closings)
.flat_map(|item| item.to_vec())
.collect()
}
fn encode_token(token: &ValueType) -> Mediate {
match *token {
ValueType::Address(ref address) => {
let mut padded = [0u8; 32];
padded[12..].copy_from_slice(address);
Mediate::Raw(vec![padded])
},
ValueType::U32(val) => Mediate::Raw(vec![pad_u32(val)]),
ValueType::U64(val) => Mediate::Raw(vec![pad_u64(val)]),
ValueType::I32(val) => Mediate::Raw(vec![pad_i32(val)]),
ValueType::I64(val) => Mediate::Raw(vec![pad_i64(val)]),
ValueType::Bytes(ref bytes) => Mediate::Prefixed(pad_bytes(bytes)),
ValueType::String(ref s) => Mediate::Prefixed(pad_bytes(s.as_bytes())),
ValueType::U256(ref h) => Mediate::Raw(vec![h.clone()]),
ValueType::H256(ref h) => Mediate::Raw(vec![h.clone()]),
ValueType::Bool(b) => {
let value = if b { 1 } else { 0 };
Mediate::Raw(vec![pad_u32(value)])
},
ValueType::Array(ref values) => {
let mediates = values.iter()
.map(encode_token)
.collect();
Mediate::Array(mediates)
},
}
}
#[cfg(test)]
mod tests {
extern crate rustc_hex as hex;
use self::hex::FromHex;
use super::super::util::pad_u32;
use super::super::ValueType;
use super::encode;
#[test]
fn encode_address() {
let address = ValueType::Address([0x11u8; 20]);
let encoded = encode(&vec![address]);
let expected = "0000000000000000000000001111111111111111111111111111111111111111".from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_dynamic_array_of_addresses() {
let address1 = ValueType::Address([0x11u8; 20]);
let address2 = ValueType::Address([0x22u8; 20]);
let addresses = ValueType::Array(vec![address1, address2]);
let encoded = encode(&vec![addresses]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000001111111111111111111111111111111111111111" +
"0000000000000000000000002222222222222222222222222222222222222222").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_two_addresses() {
let address1 = ValueType::Address([0x11u8; 20]);
let address2 = ValueType::Address([0x22u8; 20]);
let encoded = encode(&vec![address1, address2]);
let expected = ("".to_owned() +
"0000000000000000000000001111111111111111111111111111111111111111" +
"0000000000000000000000002222222222222222222222222222222222222222").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_dynamic_array_of_dynamic_arrays() {
let address1 = ValueType::Address([0x11u8; 20]);
let address2 = ValueType::Address([0x22u8; 20]);
let array0 = ValueType::Array(vec![address1]);
let array1 = ValueType::Array(vec![address2]);
let dynamic = ValueType::Array(vec![array0, array1]);
let encoded = encode(&vec![dynamic]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000080" +
"00000000000000000000000000000000000000000000000000000000000000c0" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000001111111111111111111111111111111111111111" +
"0000000000000000000000000000000000000000000000000000000000000001" +
"0000000000000000000000002222222222222222222222222222222222222222").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_dynamic_array_of_dynamic_arrays2() {
let address1 = ValueType::Address([0x11u8; 20]);
let address2 = ValueType::Address([0x22u8; 20]);
let address3 = ValueType::Address([0x33u8; 20]);
let address4 = ValueType::Address([0x44u8; 20]);
let array0 = ValueType::Array(vec![address1, address2]);
let array1 = ValueType::Array(vec![address3, address4]);
let dynamic = ValueType::Array(vec![array0, array1]);
let encoded = encode(&vec![dynamic]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000080" +
"00000000000000000000000000000000000000000000000000000000000000e0" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000001111111111111111111111111111111111111111" +
"0000000000000000000000002222222222222222222222222222222222222222" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000003333333333333333333333333333333333333333" +
"0000000000000000000000004444444444444444444444444444444444444444").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_bytes() {
let bytes = ValueType::Bytes(vec![0x12, 0x34]);
let encoded = encode(&vec![bytes]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"1234000000000000000000000000000000000000000000000000000000000000").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_string() {
let s = ValueType::String("gavofyork".to_owned());
let encoded = encode(&vec![s]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000009" +
"6761766f66796f726b0000000000000000000000000000000000000000000000").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_bytes2() {
let bytes = ValueType::Bytes("10000000000000000000000000000000000000000000000000000000000002".from_hex().unwrap());
let encoded = encode(&vec![bytes]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000020" +
"000000000000000000000000000000000000000000000000000000000000001f" +
"1000000000000000000000000000000000000000000000000000000000000200").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_bytes3() {
let bytes = ValueType::Bytes(("".to_owned() +
"1000000000000000000000000000000000000000000000000000000000000000" +
"1000000000000000000000000000000000000000000000000000000000000000").from_hex().unwrap());
let encoded = encode(&vec![bytes]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000020" +
"0000000000000000000000000000000000000000000000000000000000000040" +
"1000000000000000000000000000000000000000000000000000000000000000" +
"1000000000000000000000000000000000000000000000000000000000000000").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_two_bytes() {
let bytes1 = ValueType::Bytes("10000000000000000000000000000000000000000000000000000000000002".from_hex().unwrap());
let bytes2 = ValueType::Bytes("0010000000000000000000000000000000000000000000000000000000000002".from_hex().unwrap());
let encoded = encode(&vec![bytes1, bytes2]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000040" +
"0000000000000000000000000000000000000000000000000000000000000080" +
"000000000000000000000000000000000000000000000000000000000000001f" +
"1000000000000000000000000000000000000000000000000000000000000200" +
"0000000000000000000000000000000000000000000000000000000000000020" +
"0010000000000000000000000000000000000000000000000000000000000002").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_uint() {
let mut uint = [0u8; 32];
uint[31] = 4;
let encoded = encode(&vec![ValueType::U256(uint)]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000004").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_bool() {
let encoded = encode(&vec![ValueType::Bool(true)]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000001").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn encode_bool2() {
let encoded = encode(&vec![ValueType::Bool(false)]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000000").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn comprehensive_test() {
let bytes = ("".to_owned() +
"131a3afc00d1b1e3461b955e53fc866dcf303b3eb9f4c16f89e388930f48134b" +
"131a3afc00d1b1e3461b955e53fc866dcf303b3eb9f4c16f89e388930f48134b").from_hex().unwrap();
let encoded = encode(&vec![
ValueType::U256(pad_u32(5)),
ValueType::Bytes(bytes.clone()),
ValueType::U256(pad_u32(3)),
ValueType::Bytes(bytes)
]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000005" +
"0000000000000000000000000000000000000000000000000000000000000080" +
"0000000000000000000000000000000000000000000000000000000000000003" +
"00000000000000000000000000000000000000000000000000000000000000e0" +
"0000000000000000000000000000000000000000000000000000000000000040" +
"131a3afc00d1b1e3461b955e53fc866dcf303b3eb9f4c16f89e388930f48134b" +
"131a3afc00d1b1e3461b955e53fc866dcf303b3eb9f4c16f89e388930f48134b" +
"0000000000000000000000000000000000000000000000000000000000000040" +
"131a3afc00d1b1e3461b955e53fc866dcf303b3eb9f4c16f89e388930f48134b" +
"131a3afc00d1b1e3461b955e53fc866dcf303b3eb9f4c16f89e388930f48134b").from_hex().unwrap();
assert_eq!(encoded, expected);
}
#[test]
fn test_pad_u32() {
// this will fail if endianess is not supported
assert_eq!(pad_u32(0x1)[31], 1);
assert_eq!(pad_u32(0x100)[30], 1);
}
#[test]
fn comprehensive_test2() {
let encoded = encode(&vec![
ValueType::U256(pad_u32(1)),
ValueType::String("gavofyork".to_owned()),
ValueType::U256(pad_u32(2)),
ValueType::U256(pad_u32(3)),
ValueType::U256(pad_u32(4)),
ValueType::Array(vec![
ValueType::U256(pad_u32(5)),
ValueType::U256(pad_u32(6)),
ValueType::U256(pad_u32(7))
])
]);
let expected = ("".to_owned() +
"0000000000000000000000000000000000000000000000000000000000000001" +
"00000000000000000000000000000000000000000000000000000000000000c0" +
"0000000000000000000000000000000000000000000000000000000000000002" +
"0000000000000000000000000000000000000000000000000000000000000003" +
"0000000000000000000000000000000000000000000000000000000000000004" +
"0000000000000000000000000000000000000000000000000000000000000100" +
"0000000000000000000000000000000000000000000000000000000000000009" +
"6761766f66796f726b0000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000000000000003" +
"0000000000000000000000000000000000000000000000000000000000000005" +
"0000000000000000000000000000000000000000000000000000000000000006" +
"0000000000000000000000000000000000000000000000000000000000000007").from_hex().unwrap();
assert_eq!(encoded, expected);
}
}
|
#![feature(question_mark)]
#![feature(stmt_expr_attributes)]
extern crate git2;
extern crate ansi_term;
extern crate chrono;
extern crate lazysort;
extern crate rustc_serialize;
extern crate num_cpus;
#[macro_use]
#[cfg(feature = "qt")]
extern crate qmlrs;
extern crate time;
#[cfg(feature = "csvdump")]
extern crate csv;
use rustc_serialize::json;
use std::io::prelude::*;
use std::fs::File;
use time::precise_time_s;
mod diff;
mod stats;
mod view;
mod csv_output;
use stats::*;
use diff::*;
use view::*;
#[cfg(feature = "csvdump")]
use csv_output::*;
fn main() {
let start = precise_time_s();
let stats = gather_stats().unwrap();
let gather_time = precise_time_s() - start;
#[cfg(feature = "csvdump")]
csv_dump(&stats);
let start = precise_time_s();
let gathered = process(stats);
let stat_time = precise_time_s() - start;
let encoded = json::encode(&gathered).unwrap();
let mut f = File::create("out.json").unwrap();
f.write_all(encoded.as_bytes()).unwrap();
println!("Gathered diffs data in {} secs and processed stats in {} secs",
gather_time,
stat_time);
output(&gathered);
}
|
#[doc = "Reader of register BT_SLOT_CAPT_STATUS"]
pub type R = crate::R<u32, super::BT_SLOT_CAPT_STATUS>;
#[doc = "Reader of field `BT_SLOT`"]
pub type BT_SLOT_R = crate::R<u16, u16>;
impl R {
#[doc = "Bits 0:15 - During slave connection event, HW updates this register with the captured BT_SLOT at anchor point, granularity is 625us"]
#[inline(always)]
pub fn bt_slot(&self) -> BT_SLOT_R {
BT_SLOT_R::new((self.bits & 0xffff) as u16)
}
}
|
use std::{ops::Bound, sync::Arc};
use anyhow::Context;
use axum::extract::Extension;
use hyper::{Body, Response};
use serde_derive::Deserialize;
use svc_agent::AccountId;
use svc_utils::extractors::AccountIdExtractor;
use tracing::{error, info, instrument};
use crate::app::api::v1::{AppError, AppResult};
use crate::app::error::ErrorExt;
use crate::app::error::ErrorKind as AppErrorKind;
use crate::app::http::Json;
use crate::app::services::{self, lock_interaction};
use crate::app::AppContext;
use crate::app::{authz::AuthzObject, metrics::AuthorizeMetrics};
use crate::clients::event::LockedTypes;
use crate::db::class::KeyValueProperties;
use crate::db::class::{self, BoundedDateTimeTuple, ClassType};
#[derive(Deserialize)]
pub struct WebinarCreatePayload {
scope: String,
audience: String,
#[serde(default, with = "crate::serde::ts_seconds_option_bound_tuple")]
time: Option<BoundedDateTimeTuple>,
tags: Option<serde_json::Value>,
#[serde(default)]
properties: KeyValueProperties,
reserve: Option<i32>,
#[serde(default = "class::default_locked_chat")]
locked_chat: bool,
#[serde(default = "class::default_locked_questions")]
locked_questions: bool,
}
impl WebinarCreatePayload {
fn locked_types(&self) -> LockedTypes {
LockedTypes {
message: self.locked_chat,
reaction: self.locked_chat,
question: self.locked_questions,
question_reaction: self.locked_questions,
}
}
}
#[instrument(
skip_all,
fields(
audience = ?payload.audience,
scope = ?payload.scope
)
)]
pub async fn create(
ctx: Extension<Arc<dyn AppContext>>,
AccountIdExtractor(account_id): AccountIdExtractor,
Json(payload): Json<WebinarCreatePayload>,
) -> AppResult {
info!("Creating webinar");
let r = do_create(ctx.as_ref(), &account_id, payload).await;
if let Err(e) = &r {
error!(error = ?e, "Failed to create webinar");
}
r
}
async fn do_create(
state: &dyn AppContext,
account_id: &AccountId,
body: WebinarCreatePayload,
) -> AppResult {
let object = AuthzObject::new(&["classrooms"]).into();
state
.authz()
.authorize(
body.audience.clone(),
account_id.clone(),
object,
"create".into(),
)
.await
.measure()?;
info!("Authorized webinar create");
let dummy = insert_webinar_dummy(state, &body).await?;
let time = body.time.unwrap_or((Bound::Unbounded, Bound::Unbounded));
let result = services::create_event_and_conference_rooms(state, &dummy, &time).await;
let mut conn = state
.get_conn()
.await
.error(AppErrorKind::DbConnAcquisitionFailed)?;
let event_room_id = match result {
Ok((event_id, conference_id)) => {
info!(?event_id, ?conference_id, "Created rooms");
class::EstablishQuery::new(dummy.id(), event_id, conference_id)
.execute(&mut conn)
.await
.context("Failed to establish webinar dummy")
.error(AppErrorKind::DbQueryFailed)?;
event_id
}
Err(e) => {
info!("Failed to create rooms");
class::DeleteQuery::new(dummy.id())
.execute(&mut conn)
.await
.context("Failed to delete webinar dummy")
.error(AppErrorKind::DbQueryFailed)?;
return Err(e);
}
};
let locked_types = body.locked_types();
if locked_types.any_locked() {
lock_interaction(state, event_room_id, locked_types).await;
}
let body = serde_json::to_string_pretty(&dummy)
.context("Failed to serialize webinar")
.error(AppErrorKind::SerializationFailed)?;
let response = Response::builder()
.status(201)
.body(Body::from(body))
.unwrap();
Ok(response)
}
async fn insert_webinar_dummy(
state: &dyn AppContext,
body: &WebinarCreatePayload,
) -> Result<class::Dummy, AppError> {
let mut query = class::InsertQuery::new(
ClassType::Webinar,
body.scope.clone(),
body.audience.clone(),
body.time
.unwrap_or((Bound::Unbounded, Bound::Unbounded))
.into(),
)
.properties(body.properties.clone())
.preserve_history(true);
if let Some(ref tags) = body.tags {
query = query.tags(tags.clone())
}
if let Some(reserve) = body.reserve {
query = query.reserve(reserve)
}
let mut conn = state
.get_conn()
.await
.error(AppErrorKind::DbConnAcquisitionFailed)?;
query
.execute(&mut conn)
.await
.context("Failed to insert webinar")
.error(AppErrorKind::DbQueryFailed)?
.ok_or_else(|| AppError::from(AppErrorKind::ClassAlreadyEstablished))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{db::class::WebinarReadQuery, test_helpers::prelude::*};
use chrono::{Duration, Utc};
use mockall::predicate as pred;
use uuid::Uuid;
#[tokio::test]
async fn create_webinar_no_time() {
let agent = TestAgent::new("web", "user1", USR_AUDIENCE);
let mut authz = TestAuthz::new();
authz.allow(agent.account_id(), vec!["classrooms"], "create");
let mut state = TestState::new(authz).await;
let event_room_id = Uuid::new_v4();
let conference_room_id = Uuid::new_v4();
create_webinar_mocks(&mut state, event_room_id, conference_room_id);
let scope = random_string();
let state = Arc::new(state);
let body = WebinarCreatePayload {
scope: scope.clone(),
audience: USR_AUDIENCE.to_string(),
time: None,
tags: None,
properties: KeyValueProperties::default(),
reserve: Some(10),
locked_chat: true,
locked_questions: true,
};
let r = do_create(state.as_ref(), agent.account_id(), body).await;
r.expect("Failed to create webinar");
// Assert DB changes.
let mut conn = state.get_conn().await.expect("Failed to get conn");
let new_webinar = WebinarReadQuery::by_scope(USR_AUDIENCE, &scope)
.execute(&mut conn)
.await
.expect("Failed to fetch webinar")
.expect("Webinar not found");
assert_eq!(new_webinar.reserve(), Some(10));
}
#[tokio::test]
async fn create_webinar_with_time() {
let agent = TestAgent::new("web", "user1", USR_AUDIENCE);
let mut authz = TestAuthz::new();
authz.allow(agent.account_id(), vec!["classrooms"], "create");
let mut state = TestState::new(authz).await;
let event_room_id = Uuid::new_v4();
let conference_room_id = Uuid::new_v4();
create_webinar_mocks(&mut state, event_room_id, conference_room_id);
let scope = random_string();
let now = Utc::now();
let time = (
Bound::Included(now + Duration::hours(1)),
Bound::Excluded(now + Duration::hours(5)),
);
let state = Arc::new(state);
let body = WebinarCreatePayload {
scope: scope.clone(),
audience: USR_AUDIENCE.to_string(),
time: Some(time),
tags: None,
properties: KeyValueProperties::default(),
reserve: Some(10),
locked_chat: true,
locked_questions: true,
};
let r = do_create(state.as_ref(), agent.account_id(), body).await;
r.expect("Failed to create webinar");
// Assert DB changes.
let mut conn = state.get_conn().await.expect("Failed to get conn");
let new_webinar = WebinarReadQuery::by_scope(USR_AUDIENCE, &scope)
.execute(&mut conn)
.await
.expect("Failed to fetch webinar")
.expect("Webinar not found");
assert_eq!(new_webinar.reserve(), Some(10));
}
#[tokio::test]
async fn create_webinar_unauthorized() {
let agent = TestAgent::new("web", "user1", USR_AUDIENCE);
let state = TestState::new(TestAuthz::new()).await;
let scope = random_string();
let state = Arc::new(state);
let body = WebinarCreatePayload {
scope: scope.clone(),
audience: USR_AUDIENCE.to_string(),
time: None,
tags: None,
properties: KeyValueProperties::default(),
reserve: Some(10),
locked_chat: true,
locked_questions: true,
};
do_create(state.as_ref(), agent.account_id(), body)
.await
.expect_err("Unexpectedly succeeded");
}
#[tokio::test]
async fn create_webinar_with_properties() {
let agent = TestAgent::new("web", "user1", USR_AUDIENCE);
let mut authz = TestAuthz::new();
authz.allow(agent.account_id(), vec!["classrooms"], "create");
let mut state = TestState::new(authz).await;
let event_room_id = Uuid::new_v4();
let conference_room_id = Uuid::new_v4();
create_webinar_mocks(&mut state, event_room_id, conference_room_id);
let scope = random_string();
let mut properties: KeyValueProperties = serde_json::Map::new().into();
properties.insert("is_adult".into(), true.into());
let state = Arc::new(state);
let body = WebinarCreatePayload {
scope: scope.clone(),
audience: USR_AUDIENCE.to_string(),
time: None,
tags: None,
properties: properties.clone(),
reserve: Some(10),
locked_chat: true,
locked_questions: true,
};
let r = do_create(state.as_ref(), agent.account_id(), body).await;
r.expect("Failed to create webinar");
// Assert DB changes.
let mut conn = state.get_conn().await.expect("Failed to get conn");
let new_webinar = WebinarReadQuery::by_scope(USR_AUDIENCE, &scope)
.execute(&mut conn)
.await
.expect("Failed to fetch webinar")
.expect("Webinar not found");
assert_eq!(new_webinar.reserve(), Some(10));
assert_eq!(*new_webinar.properties(), properties);
}
fn create_webinar_mocks(state: &mut TestState, event_room_id: Uuid, conference_room_id: Uuid) {
state
.event_client_mock()
.expect_create_room()
.with(
pred::always(),
pred::always(),
pred::always(),
pred::always(),
pred::always(),
)
.returning(move |_, _, _, _, _| Ok(event_room_id));
state
.event_client_mock()
.expect_update_locked_types()
.with(pred::eq(event_room_id), pred::always())
.returning(move |_room_id, _locked_types| Ok(()));
state
.event_client_mock()
.expect_update_room()
.with(pred::eq(event_room_id), pred::always())
.returning(move |_room_id, _| Ok(()));
state
.conference_client_mock()
.expect_create_room()
.withf(move |_time, _audience, policy, reserve, _tags, _cid| {
assert_eq!(*policy, Some(String::from("shared")));
assert_eq!(*reserve, Some(10));
true
})
.returning(move |_, _, _, _, _, _| Ok(conference_room_id));
state
.conference_client_mock()
.expect_update_room()
.with(pred::eq(conference_room_id), pred::always())
.returning(move |_room_id, _| Ok(()));
}
}
|
/*===============================================================================================*/
// Copyright 2016 Kyle Finlay
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*===============================================================================================*/
use ::util::math::{Util, Vec2, Vec3};
use std::ops::*;
use std::cmp::PartialEq;
/*===============================================================================================*/
/*------MAT3 STRUCT------------------------------------------------------------------------------*/
/*===============================================================================================*/
/// 3x3 Matrix.
///
/// This struct represents a 3x3 matrix. It is normally used for 2D graphics transformations,
/// such as translation, rotation, and scaling.
#[derive (Copy, Clone, Default, Serialize, Deserialize)]
pub struct Mat3 {
// Private
_value: [Vec3; 3]
}
/*===============================================================================================*/
/*------MAT3 PUBLIC METHODS----------------------------------------------------------------------*/
/*===============================================================================================*/
impl Mat3 {
/// Formats the matrix as a string.
///
/// # Return value
/// The matrix formatted as a string.
///
/// # Examples
/// ```
/// # use ion_core::util::math::Mat3;
/// #
/// let mat = Mat3::identity ();
/// println! ("{}", mat.to_string ());
/// ```
/// ```c
/// Output : [1, 0, 0]
/// [0, 1, 0]
/// [0, 0, 1]
pub fn to_string (&self) -> String {
format! ("[{}]\n[{}]\n[{}]", self[0].to_string (),
self[1].to_string (),
self[2].to_string ())
}
/*===============================================================================================*/
/*------MAT3 PUBLIC STATIC METHODS---------------------------------------------------------------*/
/*===============================================================================================*/
/// Creates a matrix with default values.
///
/// # Return value
/// A new matrix containing all zeros.
///
/// # Examples
/// ```
/// # use ion_core::util::math::Mat3;
/// #
/// let mat = Mat3::new (); // Returns a matrix with all zeros
pub fn new () -> Mat3 {
Mat3 {_value: [Vec3::new (); 3]}
}
/*-----------------------------------------------------------------------------------------------*/
/// Returns a new identity matrix.
///
/// # Return value
/// A new identity matrix instance.
pub fn identity () -> Mat3 {
Mat3 {_value: [Vec3 {x: 1.0, y: 0.0, z: 0.0},
Vec3 {x: 0.0, y: 1.0, z: 0.0},
Vec3 {x: 0.0, y: 0.0, z: 1.0}]}
}
/*-----------------------------------------------------------------------------------------------*/
/// Returns the determinant of a matrix.
/// This is used as part of the inverse matrix calculation.
///
/// # Arguments
/// * `m` - Reference to the matrix you wish to get the determinant from.
///
/// # Return value
/// The determinant of the matrix.
///
/// # Examples
/// ```
/// # use ion_core::util::math::Mat3;
/// #
/// let mat = Mat3::identity ();
/// println! ("Determinant = {}", Mat3::determinant (&mat));
/// ```
/// ```c
/// Output : Determinant = 1.0
pub fn determinant (m: &Mat3) -> f32 {
m[0][0] * m[1][1] * m[2][2] +
m[0][1] * m[1][2] * m[2][0] +
m[0][2] * m[1][0] * m[2][1] -
m[0][0] * m[1][2] * m[2][1] -
m[0][1] * m[1][0] * m[2][2] -
m[0][2] * m[1][1] * m[2][0]
}
/*-----------------------------------------------------------------------------------------------*/
/// Returns the inverse of a matrix.
///
/// When multiplied by the original matrix, the end result will be an identity matrix.
///
/// # Arguments
/// * `m` - The reference to the matrix you wish to get the inverse of.
///
/// # Return value
/// A new matrix which is the inverse of m.
pub fn inverse (m: &Mat3) -> Mat3 {
let det = 1.0 / Mat3::determinant (m);
Mat3 {_value: [Vec3 {x: (m[1][1] * m[2][2] - m[1][2] * m[2][1]) * det,
y: (m[0][2] * m[2][1] - m[0][1] * m[2][2]) * det,
z: (m[0][1] * m[1][2] - m[0][2] * m[1][1]) * det},
Vec3 {x: (m[1][2] * m[2][0] - m[1][0] * m[2][2]) * det,
y: (m[0][0] * m[2][2] - m[0][2] * m[2][0]) * det,
z: (m[0][2] * m[1][0] - m[0][0] * m[1][2]) * det},
Vec3 {x: (m[1][0] * m[2][1] - m[1][1] * m[2][0]) * det,
y: (m[0][1] * m[2][0] - m[0][0] * m[2][1]) * det,
z: (m[0][0] * m[1][1] - m[0][1] * m[1][0]) * det}]}
}
/*-----------------------------------------------------------------------------------------------*/
/// Returns a 2D translation matrix.
///
/// It represents a transformation in 2D space.
/// When combined with a rotation and scale matrix, it creates a model matrix.
///
/// # Arguments
/// * `position` - A 'Vec2' representing the position.
///
/// # Return value
/// A new translation matrix.
///
/// # Examples
/// ```
/// # use ion_core::util::math::{Mat3, Vec2};
/// #
/// let pos = Vec2 {x : 10.0, y : 43.0};
/// let mat = Mat3::translate (&pos);
pub fn translate (position: &Vec2) -> Mat3 {
Mat3 {_value: [Vec3 {x: 1.0, y: 0.0, z: position.x},
Vec3 {x: 0.0, y: 1.0, z: position.y},
Vec3 {x: 0.0, y: 0.0, z: 1.0}]}
}
/*-----------------------------------------------------------------------------------------------*/
/// Returns a 2D rotation matrix.
///
/// It represents a rotation in 2D space.
/// When combined with a translation and scale matrix, it creates a model matrix.
///
/// It takes in a single f32 argument, which is the rotation in degrees.
///
/// # Arguments
/// * `rotation` - The desired rotation in degrees.
///
/// # Return value
/// A new rotation matrix.
///
/// # Examples
/// ```
/// # use ion_core::util::math::Mat3;
/// #
/// let rot = 45.0;
/// let mat = Mat3::rotate (rot);
pub fn rotate (rotation: f32) -> Mat3 {
// Convert rotation to radians
let rad_rotation = Util::deg2rad (rotation);
let z_sin = rad_rotation.sin ();
let z_cos = rad_rotation.cos ();
// Transform z-axis
Mat3 {_value: [Vec3 {x: z_cos, y: -z_sin, z: 0.0},
Vec3 {x: z_sin, y: z_cos, z: 0.0},
Vec3 {x: 0.0, y: 0.0, z: 1.0}]}
}
/*-----------------------------------------------------------------------------------------------*/
/// Returns a 2D scale matrix.
///
/// It represents a scale in 2D space.
/// When combined with a rotation and rotation matrix, it creates a model matrix.
///
/// # Arguments
/// * `scale` - A Vec2 representing the desired scale.
///
/// # Return value
/// A new scale matrix.
///
/// # Examples
/// ```
/// # use ion_core::util::math::{Mat3, Vec2};
/// #
/// let scale = Vec2 {x : 5.0, y : 1.0};
/// let mat = Mat3::scale (&scale);
pub fn scale (scale: &Vec2) -> Mat3 {
Mat3 {_value: [Vec3 {x: scale.x, y: 0.0, z: 0.0},
Vec3 {x: 0.0, y: scale.y, z: 0.0},
Vec3 {x: 0.0, y: 0.0, z: 1.0}]}
}
}
/*===============================================================================================*/
/*------MAT3 OPERATOR OVERLOADS------------------------------------------------------------------*/
/*===============================================================================================*/
impl Add for Mat3 {
type Output = Mat3;
// Addition operator (matrix)
fn add (self, rhs: Mat3) -> Mat3 {
Mat3 {_value: [self[0] + rhs[0],
self[1] + rhs[1],
self[2] + rhs[2]]}
}
}
/*-----------------------------------------------------------------------------------------------*/
impl Add <Vec3> for Mat3 {
type Output = Mat3;
// Addition operator (vector)
fn add (self, rhs: Vec3) -> Mat3 {
Mat3 {_value: [self[0] + rhs,
self[1] + rhs,
self[2] + rhs]}
}
}
/*-----------------------------------------------------------------------------------------------*/
impl AddAssign for Mat3 {
// Addition assignment operator (matrix)
fn add_assign (&mut self, rhs: Mat3) {
self._value = (*self + rhs)._value;
}
}
/*-----------------------------------------------------------------------------------------------*/
impl AddAssign <Vec3> for Mat3 {
// Addition assignment operator (vector)
fn add_assign (&mut self, rhs: Vec3) {
self._value = (*self + rhs)._value;
}
}
/*-----------------------------------------------------------------------------------------------*/
impl Sub for Mat3 {
type Output = Mat3;
// Subtraction operator (matrix)
fn sub (self, rhs: Mat3) -> Mat3 {
Mat3 {_value: [self[0] - rhs[0],
self[1] - rhs[1],
self[2] - rhs[2]]}
}
}
/*-----------------------------------------------------------------------------------------------*/
impl Sub <Vec3> for Mat3 {
type Output = Mat3;
// Subtraction operator (vector)
fn sub (self, rhs: Vec3) -> Mat3 {
Mat3 {_value: [self[0] - rhs,
self[1] - rhs,
self[2] - rhs]}
}
}
/*-----------------------------------------------------------------------------------------------*/
impl SubAssign for Mat3 {
// Subtraction assignment operator (matrix)
fn sub_assign (&mut self, rhs: Mat3) {
self._value = (*self - rhs)._value;
}
}
/*-----------------------------------------------------------------------------------------------*/
impl SubAssign <Vec3> for Mat3 {
// Subtraction assignment operator (vector)
fn sub_assign (&mut self, rhs: Vec3) {
self._value = (*self - rhs)._value;
}
}
/*-----------------------------------------------------------------------------------------------*/
impl Mul for Mat3 {
type Output = Mat3;
// Multiplication operator (matrix)
fn mul (self, rhs: Mat3) -> Mat3 {
let mut return_matrix = Mat3::new ();
for row in 0..3 {
for col in 0..3 {
for inner in 0..3 {
return_matrix[row][col] += self[row][inner] * rhs[inner][col];
}
}
}
return_matrix
}
}
/*-----------------------------------------------------------------------------------------------*/
impl Mul <Vec2> for Mat3 {
type Output = Vec2;
// Multiplication operator (Vec2)
fn mul (self, rhs: Vec2) -> Vec2 {
Vec2 {x : (self[0][0] * rhs.x) + (self[1][0] * rhs.y) + self[2][0],
y : (self[0][1] * rhs.x) + (self[1][1] * rhs.y) + self[2][1]}
}
}
/*-----------------------------------------------------------------------------------------------*/
impl MulAssign for Mat3 {
// Multiplication assignment operator (matrix)
fn mul_assign (&mut self, rhs: Mat3) {
self._value = (*self * rhs)._value;
}
}
/*-----------------------------------------------------------------------------------------------*/
impl PartialEq for Mat3 {
// Equal to operator
fn eq (&self, rhs: &Mat3) -> bool {
self[0] == rhs[0] &&
self[1] == rhs[1] &&
self[2] == rhs[2]
}
/*-----------------------------------------------------------------------------------------------*/
// Not equal to operator
fn ne (&self, rhs: &Mat3) -> bool {
self[0] != rhs[0] ||
self[1] != rhs[1] ||
self[2] != rhs[2]
}
}
/*-----------------------------------------------------------------------------------------------*/
impl Index <u8> for Mat3 {
type Output = Vec3;
// Index operator (immutable)
fn index (&self, index: u8) -> &Vec3 {
match index {
0 => &self._value[0],
1 => &self._value[1],
2 => &self._value[2],
_ => unreachable! ("Index out of range for Mat3")
}
}
}
/*-----------------------------------------------------------------------------------------------*/
impl IndexMut <u8> for Mat3 {
// Index operator (mutable)
fn index_mut (&mut self, index: u8) -> &mut Vec3 {
match index {
0 => &mut self._value[0],
1 => &mut self._value[1],
2 => &mut self._value[2],
_ => unreachable! ("Index out of range for Mat3")
}
}
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Workspace {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<WorkspaceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceProperties {
#[serde(rename = "workspaceId", default, skip_serializing_if = "Option::is_none")]
pub workspace_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
#[serde(rename = "creationTime", default, skip_serializing_if = "Option::is_none")]
pub creation_time: Option<String>,
#[serde(rename = "keyVault", default, skip_serializing_if = "Option::is_none")]
pub key_vault: Option<String>,
#[serde(rename = "applicationInsights", default, skip_serializing_if = "Option::is_none")]
pub application_insights: Option<String>,
#[serde(rename = "containerRegistry", default, skip_serializing_if = "Option::is_none")]
pub container_registry: Option<String>,
#[serde(rename = "storageAccount", default, skip_serializing_if = "Option::is_none")]
pub storage_account: Option<String>,
#[serde(rename = "discoveryUrl", default, skip_serializing_if = "Option::is_none")]
pub discovery_url: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<workspace_properties::ProvisioningState>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub encryption: Option<EncryptionProperty>,
#[serde(rename = "hbiWorkspace", default, skip_serializing_if = "Option::is_none")]
pub hbi_workspace: Option<bool>,
#[serde(rename = "serviceProvisionedResourceGroup", default, skip_serializing_if = "Option::is_none")]
pub service_provisioned_resource_group: Option<String>,
#[serde(rename = "privateLinkCount", default, skip_serializing_if = "Option::is_none")]
pub private_link_count: Option<i32>,
#[serde(rename = "imageBuildCompute", default, skip_serializing_if = "Option::is_none")]
pub image_build_compute: Option<String>,
#[serde(rename = "allowPublicAccessWhenBehindVnet", default, skip_serializing_if = "Option::is_none")]
pub allow_public_access_when_behind_vnet: Option<bool>,
#[serde(rename = "privateEndpointConnections", default, skip_serializing_if = "Vec::is_empty")]
pub private_endpoint_connections: Vec<PrivateEndpointConnection>,
#[serde(rename = "sharedPrivateLinkResources", default, skip_serializing_if = "Vec::is_empty")]
pub shared_private_link_resources: Vec<SharedPrivateLinkResource>,
}
pub mod workspace_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Unknown,
Updating,
Creating,
Deleting,
Succeeded,
Failed,
Canceled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<WorkspacePropertiesUpdateParameters>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspacePropertiesUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmlUserFeature {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListAmlUserFeatureResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<AmlUserFeature>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UsageName {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")]
pub localized_value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Usage {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<usage::Unit>,
#[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")]
pub current_value: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<UsageName>,
}
pub mod usage {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListUsagesResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Usage>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSize {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub family: Option<String>,
#[serde(rename = "vCPUs", default, skip_serializing_if = "Option::is_none")]
pub v_cp_us: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub gpus: Option<i32>,
#[serde(rename = "osVhdSizeMB", default, skip_serializing_if = "Option::is_none")]
pub os_vhd_size_mb: Option<i32>,
#[serde(rename = "maxResourceVolumeMB", default, skip_serializing_if = "Option::is_none")]
pub max_resource_volume_mb: Option<i32>,
#[serde(rename = "memoryGB", default, skip_serializing_if = "Option::is_none")]
pub memory_gb: Option<f64>,
#[serde(rename = "lowPriorityCapable", default, skip_serializing_if = "Option::is_none")]
pub low_priority_capable: Option<bool>,
#[serde(rename = "premiumIO", default, skip_serializing_if = "Option::is_none")]
pub premium_io: Option<bool>,
#[serde(rename = "estimatedVMPrices", default, skip_serializing_if = "Option::is_none")]
pub estimated_vm_prices: Option<EstimatedVmPrices>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EstimatedVmPrices {
#[serde(rename = "billingCurrency")]
pub billing_currency: estimated_vm_prices::BillingCurrency,
#[serde(rename = "unitOfMeasure")]
pub unit_of_measure: estimated_vm_prices::UnitOfMeasure,
pub values: Vec<EstimatedVmPrice>,
}
pub mod estimated_vm_prices {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum BillingCurrency {
#[serde(rename = "USD")]
Usd,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum UnitOfMeasure {
OneHour,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EstimatedVmPrice {
#[serde(rename = "retailPrice")]
pub retail_price: f64,
#[serde(rename = "osType")]
pub os_type: estimated_vm_price::OsType,
#[serde(rename = "vmTier")]
pub vm_tier: estimated_vm_price::VmTier,
}
pub mod estimated_vm_price {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Linux,
Windows,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VmTier {
Standard,
LowPriority,
Spot,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSizeListResult {
#[serde(rename = "amlCompute", default, skip_serializing_if = "Vec::is_empty")]
pub aml_compute: Vec<VirtualMachineSize>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Workspace>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuotaBaseProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<quota_base_properties::Unit>,
}
pub mod quota_base_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuotaUpdateParameters {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<QuotaBaseProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateWorkspaceQuotasResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<UpdateWorkspaceQuotas>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateWorkspaceQuotas {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<update_workspace_quotas::Unit>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<update_workspace_quotas::Status>,
}
pub mod update_workspace_quotas {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Undefined,
Success,
Failure,
InvalidQuotaBelowClusterMinimum,
InvalidQuotaExceedsSubscriptionLimit,
#[serde(rename = "InvalidVMFamilyName")]
InvalidVmFamilyName,
OperationNotSupportedForSku,
OperationNotEnabledForRegion,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceName {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")]
pub localized_value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceQuota {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<ResourceName>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<resource_quota::Unit>,
}
pub mod resource_quota {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListWorkspaceQuotas {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ResourceQuota>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Identity {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<identity::Type>,
#[serde(rename = "userAssignedIdentities", default, skip_serializing_if = "Option::is_none")]
pub user_assigned_identities: Option<UserAssignedIdentities>,
}
pub mod identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
SystemAssigned,
#[serde(rename = "SystemAssigned,UserAssigned")]
SystemAssignedUserAssigned,
UserAssigned,
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserAssignedIdentities {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserAssignedIdentity {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceId {
pub id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListWorkspaceKeysResult {
#[serde(rename = "userStorageKey", default, skip_serializing_if = "Option::is_none")]
pub user_storage_key: Option<String>,
#[serde(rename = "userStorageResourceId", default, skip_serializing_if = "Option::is_none")]
pub user_storage_resource_id: Option<String>,
#[serde(rename = "appInsightsInstrumentationKey", default, skip_serializing_if = "Option::is_none")]
pub app_insights_instrumentation_key: Option<String>,
#[serde(rename = "containerRegistryCredentials", default, skip_serializing_if = "Option::is_none")]
pub container_registry_credentials: Option<RegistryListCredentialsResult>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RegistryListCredentialsResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub passwords: Vec<Password>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Password {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PaginatedComputeResourcesList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ComputeResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Compute {
#[serde(rename = "computeType")]
pub compute_type: ComputeType,
#[serde(rename = "computeLocation", default, skip_serializing_if = "Option::is_none")]
pub compute_location: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<compute::ProvisioningState>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "createdOn", default, skip_serializing_if = "Option::is_none")]
pub created_on: Option<String>,
#[serde(rename = "modifiedOn", default, skip_serializing_if = "Option::is_none")]
pub modified_on: Option<String>,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[serde(rename = "provisioningErrors", default, skip_serializing_if = "Vec::is_empty")]
pub provisioning_errors: Vec<MachineLearningServiceError>,
#[serde(rename = "isAttachedCompute", default, skip_serializing_if = "Option::is_none")]
pub is_attached_compute: Option<bool>,
}
pub mod compute {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Unknown,
Updating,
Creating,
Deleting,
Succeeded,
Failed,
Canceled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Aks {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmlCompute {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachine {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsight {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFactory {
#[serde(flatten)]
pub compute: Compute,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Databricks {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataLakeAnalytics {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServicePrincipalCredentials {
#[serde(rename = "clientId")]
pub client_id: String,
#[serde(rename = "clientSecret")]
pub client_secret: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SystemService {
#[serde(rename = "systemServiceType", default, skip_serializing_if = "Option::is_none")]
pub system_service_type: Option<String>,
#[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SslConfiguration {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<ssl_configuration::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cert: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cname: Option<String>,
}
pub mod ssl_configuration {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Disabled,
Enabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksNetworkingConfiguration {
#[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")]
pub subnet_id: Option<String>,
#[serde(rename = "serviceCidr", default, skip_serializing_if = "Option::is_none")]
pub service_cidr: Option<String>,
#[serde(rename = "dnsServiceIP", default, skip_serializing_if = "Option::is_none")]
pub dns_service_ip: Option<String>,
#[serde(rename = "dockerBridgeCidr", default, skip_serializing_if = "Option::is_none")]
pub docker_bridge_cidr: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserAccountCredentials {
#[serde(rename = "adminUserName")]
pub admin_user_name: String,
#[serde(rename = "adminUserSshPublicKey", default, skip_serializing_if = "Option::is_none")]
pub admin_user_ssh_public_key: Option<String>,
#[serde(rename = "adminUserPassword", default, skip_serializing_if = "Option::is_none")]
pub admin_user_password: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScaleSettings {
#[serde(rename = "maxNodeCount")]
pub max_node_count: i32,
#[serde(rename = "minNodeCount", default, skip_serializing_if = "Option::is_none")]
pub min_node_count: Option<i32>,
#[serde(rename = "nodeIdleTimeBeforeScaleDown", default, skip_serializing_if = "Option::is_none")]
pub node_idle_time_before_scale_down: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineImage {
pub id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NodeStateCounts {
#[serde(rename = "idleNodeCount", default, skip_serializing_if = "Option::is_none")]
pub idle_node_count: Option<i32>,
#[serde(rename = "runningNodeCount", default, skip_serializing_if = "Option::is_none")]
pub running_node_count: Option<i32>,
#[serde(rename = "preparingNodeCount", default, skip_serializing_if = "Option::is_none")]
pub preparing_node_count: Option<i32>,
#[serde(rename = "unusableNodeCount", default, skip_serializing_if = "Option::is_none")]
pub unusable_node_count: Option<i32>,
#[serde(rename = "leavingNodeCount", default, skip_serializing_if = "Option::is_none")]
pub leaving_node_count: Option<i32>,
#[serde(rename = "preemptedNodeCount", default, skip_serializing_if = "Option::is_none")]
pub preempted_node_count: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ClusterUpdateProperties {
#[serde(rename = "scaleSettings", default, skip_serializing_if = "Option::is_none")]
pub scale_settings: Option<ScaleSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ClusterUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ClusterUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeNodesInformation {
#[serde(rename = "computeType")]
pub compute_type: ComputeType,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmlComputeNodesInformation {
#[serde(flatten)]
pub compute_nodes_information: ComputeNodesInformation,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmlComputeNodeInformation {
#[serde(rename = "nodeId", default, skip_serializing_if = "Option::is_none")]
pub node_id: Option<String>,
#[serde(rename = "privateIpAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
#[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<f64>,
#[serde(rename = "nodeState", default, skip_serializing_if = "Option::is_none")]
pub node_state: Option<aml_compute_node_information::NodeState>,
#[serde(rename = "runId", default, skip_serializing_if = "Option::is_none")]
pub run_id: Option<String>,
}
pub mod aml_compute_node_information {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum NodeState {
#[serde(rename = "idle")]
Idle,
#[serde(rename = "running")]
Running,
#[serde(rename = "preparing")]
Preparing,
#[serde(rename = "unusable")]
Unusable,
#[serde(rename = "leaving")]
Leaving,
#[serde(rename = "preempted")]
Preempted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSshCredentials {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
#[serde(rename = "publicKeyData", default, skip_serializing_if = "Option::is_none")]
pub public_key_data: Option<String>,
#[serde(rename = "privateKeyData", default, skip_serializing_if = "Option::is_none")]
pub private_key_data: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeSecrets {
#[serde(rename = "computeType")]
pub compute_type: ComputeType,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksComputeSecrets {
#[serde(flatten)]
pub compute_secrets: ComputeSecrets,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSecrets {
#[serde(flatten)]
pub compute_secrets: ComputeSecrets,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabricksComputeSecrets {
#[serde(flatten)]
pub compute_secrets: ComputeSecrets,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeType {
#[serde(rename = "AKS")]
Aks,
AmlCompute,
DataFactory,
VirtualMachine,
#[serde(rename = "HDInsight")]
HdInsight,
Databricks,
DataLakeAnalytics,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MachineLearningServiceError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorResponse>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDetail>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetail {
pub code: String,
pub message: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SkuCapability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuLocationInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
#[serde(rename = "zoneDetails", default, skip_serializing_if = "Vec::is_empty")]
pub zone_details: Vec<ResourceSkuZoneDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuZoneDetails {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub name: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<SkuCapability>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceSku {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub locations: Vec<String>,
#[serde(rename = "locationInfo", default, skip_serializing_if = "Vec::is_empty")]
pub location_info: Vec<ResourceSkuLocationInfo>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
#[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")]
pub resource_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<SkuCapability>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub restrictions: Vec<Restriction>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Restriction {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub values: Vec<String>,
#[serde(rename = "reasonCode", default, skip_serializing_if = "Option::is_none")]
pub reason_code: Option<restriction::ReasonCode>,
}
pub mod restriction {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReasonCode {
NotSpecified,
NotAvailableForRegion,
NotAvailableForSubscription,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SkuListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<WorkspaceSku>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnection {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateEndpointConnectionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionProperties {
#[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")]
pub private_endpoint: Option<PrivateEndpoint>,
#[serde(rename = "privateLinkServiceConnectionState")]
pub private_link_service_connection_state: PrivateLinkServiceConnectionState,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<PrivateEndpointConnectionProvisioningState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpoint {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServiceConnectionState {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<PrivateEndpointServiceConnectionStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "actionsRequired", default, skip_serializing_if = "Option::is_none")]
pub actions_required: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointServiceConnectionStatus {
Pending,
Approved,
Rejected,
Disconnected,
Timeout,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointConnectionProvisioningState {
Succeeded,
Creating,
Deleting,
Failed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateLinkResourceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceProperties {
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[serde(rename = "requiredMembers", default, skip_serializing_if = "Vec::is_empty")]
pub required_members: Vec<String>,
#[serde(rename = "requiredZoneNames", default, skip_serializing_if = "Vec::is_empty")]
pub required_zone_names: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedPrivateLinkResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SharedPrivateLinkResourceProperty>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedPrivateLinkResourceProperty {
#[serde(rename = "privateLinkResourceId", default, skip_serializing_if = "Option::is_none")]
pub private_link_resource_id: Option<String>,
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[serde(rename = "requestMessage", default, skip_serializing_if = "Option::is_none")]
pub request_message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<PrivateEndpointServiceConnectionStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionProperty {
pub status: encryption_property::Status,
#[serde(rename = "keyVaultProperties")]
pub key_vault_properties: KeyVaultProperties,
}
pub mod encryption_property {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyVaultProperties {
#[serde(rename = "keyVaultArmId")]
pub key_vault_arm_id: String,
#[serde(rename = "keyIdentifier")]
pub key_identifier: String,
#[serde(rename = "identityClientId", default, skip_serializing_if = "Option::is_none")]
pub identity_client_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedWorkspace {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<LinkedWorkspaceProps>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedWorkspaceDto {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<LinkedWorkspaceProps>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedWorkspaceProps {
#[serde(rename = "linkedWorkspaceResourceId", default, skip_serializing_if = "Option::is_none")]
pub linked_workspace_resource_id: Option<String>,
#[serde(rename = "userAssignedIdentityResourceId", default, skip_serializing_if = "Option::is_none")]
pub user_assigned_identity_resource_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceResponseBase {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "kvTags", default, skip_serializing_if = "Option::is_none")]
pub kv_tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<service_response_base::State>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<serde_json::Value>,
#[serde(rename = "computeType")]
pub compute_type: service_response_base::ComputeType,
#[serde(rename = "deploymentType", default, skip_serializing_if = "Option::is_none")]
pub deployment_type: Option<service_response_base::DeploymentType>,
}
pub mod service_response_base {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Transitioning,
Healthy,
Unhealthy,
Failed,
Unschedulable,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeType {
#[serde(rename = "ACI")]
Aci,
#[serde(rename = "AKS")]
Aks,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeploymentType {
#[serde(rename = "GRPCRealtimeEndpoint")]
GrpcRealtimeEndpoint,
HttpRealtimeEndpoint,
Batch,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PaginatedServiceList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ServiceResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AciServiceResponse {
#[serde(flatten)]
pub service_response_base: ServiceResponseBase,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContainerResourceRequirements {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cpu: Option<f64>,
#[serde(rename = "memoryInGB", default, skip_serializing_if = "Option::is_none")]
pub memory_in_gb: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub gpu: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub fpga: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ModelDataCollection {
#[serde(rename = "eventHubEnabled", default, skip_serializing_if = "Option::is_none")]
pub event_hub_enabled: Option<bool>,
#[serde(rename = "storageEnabled", default, skip_serializing_if = "Option::is_none")]
pub storage_enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VnetConfiguration {
#[serde(rename = "vnetName", default, skip_serializing_if = "Option::is_none")]
pub vnet_name: Option<String>,
#[serde(rename = "subnetName", default, skip_serializing_if = "Option::is_none")]
pub subnet_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionProperties {
#[serde(rename = "vaultBaseUrl")]
pub vault_base_url: String,
#[serde(rename = "keyName")]
pub key_name: String,
#[serde(rename = "keyVersion")]
pub key_version: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Model {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub framework: Option<String>,
#[serde(rename = "frameworkVersion", default, skip_serializing_if = "Option::is_none")]
pub framework_version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<i64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub datasets: Vec<DatasetReference>,
pub url: String,
#[serde(rename = "mimeType")]
pub mime_type: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "createdTime", default, skip_serializing_if = "Option::is_none")]
pub created_time: Option<String>,
#[serde(rename = "modifiedTime", default, skip_serializing_if = "Option::is_none")]
pub modified_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unpack: Option<bool>,
#[serde(rename = "parentModelId", default, skip_serializing_if = "Option::is_none")]
pub parent_model_id: Option<String>,
#[serde(rename = "runId", default, skip_serializing_if = "Option::is_none")]
pub run_id: Option<String>,
#[serde(rename = "experimentName", default, skip_serializing_if = "Option::is_none")]
pub experiment_name: Option<String>,
#[serde(rename = "kvTags", default, skip_serializing_if = "Option::is_none")]
pub kv_tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
#[serde(rename = "derivedModelIds", default, skip_serializing_if = "Vec::is_empty")]
pub derived_model_ids: Vec<String>,
#[serde(rename = "sampleInputData", default, skip_serializing_if = "Option::is_none")]
pub sample_input_data: Option<String>,
#[serde(rename = "sampleOutputData", default, skip_serializing_if = "Option::is_none")]
pub sample_output_data: Option<String>,
#[serde(rename = "resourceRequirements", default, skip_serializing_if = "Option::is_none")]
pub resource_requirements: Option<ContainerResourceRequirements>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EnvironmentImageRequest {
#[serde(rename = "driverProgram", default, skip_serializing_if = "Option::is_none")]
pub driver_program: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub assets: Vec<ImageAsset>,
#[serde(rename = "modelIds", default, skip_serializing_if = "Vec::is_empty")]
pub model_ids: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub models: Vec<Model>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub environment: Option<serde_json::Value>,
#[serde(rename = "environmentReference", default, skip_serializing_if = "Option::is_none")]
pub environment_reference: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EnvironmentImageResponse {
#[serde(rename = "driverProgram", default, skip_serializing_if = "Option::is_none")]
pub driver_program: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub assets: Vec<ImageAsset>,
#[serde(rename = "modelIds", default, skip_serializing_if = "Vec::is_empty")]
pub model_ids: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub models: Vec<Model>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub environment: Option<serde_json::Value>,
#[serde(rename = "environmentReference", default, skip_serializing_if = "Option::is_none")]
pub environment_reference: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImageAsset {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "mimeType", default, skip_serializing_if = "Option::is_none")]
pub mime_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unpack: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ModelEnvironmentDefinition {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub python: Option<serde_json::Value>,
#[serde(rename = "environmentVariables", default, skip_serializing_if = "Option::is_none")]
pub environment_variables: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub docker: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub spark: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub r: Option<serde_json::Value>,
#[serde(rename = "inferencingStackVersion", default, skip_serializing_if = "Option::is_none")]
pub inferencing_stack_version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ModelEnvironmentDefinitionResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub python: Option<serde_json::Value>,
#[serde(rename = "environmentVariables", default, skip_serializing_if = "Option::is_none")]
pub environment_variables: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub docker: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub spark: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub r: Option<serde_json::Value>,
#[serde(rename = "inferencingStackVersion", default, skip_serializing_if = "Option::is_none")]
pub inferencing_stack_version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EnvironmentReference {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ModelPythonSection {
#[serde(rename = "interpreterPath", default, skip_serializing_if = "Option::is_none")]
pub interpreter_path: Option<String>,
#[serde(rename = "userManagedDependencies", default, skip_serializing_if = "Option::is_none")]
pub user_managed_dependencies: Option<bool>,
#[serde(rename = "condaDependencies", default, skip_serializing_if = "Option::is_none")]
pub conda_dependencies: Option<serde_json::Value>,
#[serde(rename = "baseCondaEnvironment", default, skip_serializing_if = "Option::is_none")]
pub base_conda_environment: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContainerRegistry {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub address: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContainerRegistryResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub address: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ModelDockerSection {
#[serde(rename = "baseImage", default, skip_serializing_if = "Option::is_none")]
pub base_image: Option<String>,
#[serde(rename = "baseDockerfile", default, skip_serializing_if = "Option::is_none")]
pub base_dockerfile: Option<String>,
#[serde(rename = "baseImageRegistry", default, skip_serializing_if = "Option::is_none")]
pub base_image_registry: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ModelDockerSectionResponse {
#[serde(rename = "baseImage", default, skip_serializing_if = "Option::is_none")]
pub base_image: Option<String>,
#[serde(rename = "baseDockerfile", default, skip_serializing_if = "Option::is_none")]
pub base_dockerfile: Option<String>,
#[serde(rename = "baseImageRegistry", default, skip_serializing_if = "Option::is_none")]
pub base_image_registry: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SparkMavenPackage {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub group: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub artifact: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ModelSparkSection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub repositories: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub packages: Vec<SparkMavenPackage>,
#[serde(rename = "precachePackages", default, skip_serializing_if = "Option::is_none")]
pub precache_packages: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RCranPackage {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub repository: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RGitHubPackage {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub repository: Option<String>,
#[serde(rename = "authToken", default, skip_serializing_if = "Option::is_none")]
pub auth_token: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RGitHubPackageResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub repository: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RSection {
#[serde(rename = "rVersion", default, skip_serializing_if = "Option::is_none")]
pub r_version: Option<String>,
#[serde(rename = "userManaged", default, skip_serializing_if = "Option::is_none")]
pub user_managed: Option<bool>,
#[serde(rename = "rscriptPath", default, skip_serializing_if = "Option::is_none")]
pub rscript_path: Option<String>,
#[serde(rename = "snapshotDate", default, skip_serializing_if = "Option::is_none")]
pub snapshot_date: Option<String>,
#[serde(rename = "cranPackages", default, skip_serializing_if = "Vec::is_empty")]
pub cran_packages: Vec<RCranPackage>,
#[serde(rename = "gitHubPackages", default, skip_serializing_if = "Vec::is_empty")]
pub git_hub_packages: Vec<RGitHubPackage>,
#[serde(rename = "customUrlPackages", default, skip_serializing_if = "Vec::is_empty")]
pub custom_url_packages: Vec<String>,
#[serde(rename = "bioConductorPackages", default, skip_serializing_if = "Vec::is_empty")]
pub bio_conductor_packages: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RSectionResponse {
#[serde(rename = "rVersion", default, skip_serializing_if = "Option::is_none")]
pub r_version: Option<String>,
#[serde(rename = "userManaged", default, skip_serializing_if = "Option::is_none")]
pub user_managed: Option<bool>,
#[serde(rename = "rscriptPath", default, skip_serializing_if = "Option::is_none")]
pub rscript_path: Option<String>,
#[serde(rename = "snapshotDate", default, skip_serializing_if = "Option::is_none")]
pub snapshot_date: Option<String>,
#[serde(rename = "cranPackages", default, skip_serializing_if = "Vec::is_empty")]
pub cran_packages: Vec<RCranPackage>,
#[serde(rename = "gitHubPackages", default, skip_serializing_if = "Vec::is_empty")]
pub git_hub_packages: Vec<RGitHubPackageResponse>,
#[serde(rename = "customUrlPackages", default, skip_serializing_if = "Vec::is_empty")]
pub custom_url_packages: Vec<String>,
#[serde(rename = "bioConductorPackages", default, skip_serializing_if = "Vec::is_empty")]
pub bio_conductor_packages: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatasetReference {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksVariantResponse {
#[serde(flatten)]
pub service_response_base: ServiceResponseBase,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AutoScaler {
#[serde(rename = "autoscaleEnabled", default, skip_serializing_if = "Option::is_none")]
pub autoscale_enabled: Option<bool>,
#[serde(rename = "minReplicas", default, skip_serializing_if = "Option::is_none")]
pub min_replicas: Option<i32>,
#[serde(rename = "maxReplicas", default, skip_serializing_if = "Option::is_none")]
pub max_replicas: Option<i32>,
#[serde(rename = "targetUtilization", default, skip_serializing_if = "Option::is_none")]
pub target_utilization: Option<i32>,
#[serde(rename = "refreshPeriodInSeconds", default, skip_serializing_if = "Option::is_none")]
pub refresh_period_in_seconds: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksReplicaStatus {
#[serde(rename = "desiredReplicas", default, skip_serializing_if = "Option::is_none")]
pub desired_replicas: Option<i32>,
#[serde(rename = "updatedReplicas", default, skip_serializing_if = "Option::is_none")]
pub updated_replicas: Option<i32>,
#[serde(rename = "availableReplicas", default, skip_serializing_if = "Option::is_none")]
pub available_replicas: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LivenessProbeRequirements {
#[serde(rename = "failureThreshold", default, skip_serializing_if = "Option::is_none")]
pub failure_threshold: Option<i32>,
#[serde(rename = "successThreshold", default, skip_serializing_if = "Option::is_none")]
pub success_threshold: Option<i32>,
#[serde(rename = "timeoutSeconds", default, skip_serializing_if = "Option::is_none")]
pub timeout_seconds: Option<i32>,
#[serde(rename = "periodSeconds", default, skip_serializing_if = "Option::is_none")]
pub period_seconds: Option<i32>,
#[serde(rename = "initialDelaySeconds", default, skip_serializing_if = "Option::is_none")]
pub initial_delay_seconds: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksServiceResponse {
#[serde(flatten)]
pub aks_variant_response: AksVariantResponse,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthKeys {
#[serde(rename = "primaryKey", default, skip_serializing_if = "Option::is_none")]
pub primary_key: Option<String>,
#[serde(rename = "secondaryKey", default, skip_serializing_if = "Option::is_none")]
pub secondary_key: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CreateServiceRequest {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "kvTags", default, skip_serializing_if = "Option::is_none")]
pub kv_tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub keys: Option<serde_json::Value>,
#[serde(rename = "computeType")]
pub compute_type: create_service_request::ComputeType,
#[serde(rename = "environmentImageRequest", default, skip_serializing_if = "Option::is_none")]
pub environment_image_request: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
}
pub mod create_service_request {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeType {
#[serde(rename = "ACI")]
Aci,
#[serde(rename = "AKS")]
Aks,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AciServiceCreateRequest {
#[serde(flatten)]
pub create_service_request: CreateServiceRequest,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksServiceCreateRequest {
#[serde(flatten)]
pub create_endpoint_variant_request: CreateEndpointVariantRequest,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CreateEndpointVariantRequest {
#[serde(flatten)]
pub create_service_request: CreateServiceRequest,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
|
// 34. Find First and Last Position of Element in Sorted Array
/*
Given an array of integers nums sorted in ascending order,
find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
*/
/*
example:
Input: nums = [5,7,7,8,8,10], target = 8
Output: [3,4]
*/
// small problems/ steps
// do not search if it doesn't contain the target
// binary search for the target and loop up until the end
pub fn binary_search(nums: &Vec<i32>, target: i32, left: bool) -> i32 {
let mut low = 0;
let mut high = nums.len();
while low < high {
let mid = ( low + high ) / 2;
if nums[mid] as i32 > target || (left && target == nums[mid] as i32) {
high = mid;
} else {
low = mid + 1;
}
}
return low as i32;
}
pub fn search_range(nums: Vec<i32>, target: i32) -> Vec<i32> {
let mut answer: Vec<i32> = vec![-1,-1];
let left_index = binary_search(&nums, target, true);
// assert that `leftIdx` is within the array bounds and that `target`
// is actually in `nums`.
if left_index == nums.len() as i32 || nums[left_index as usize] as i32 != target {
return answer;
}
answer[0] = left_index;
answer[1] = binary_search(&nums, target, false) -1;
return answer;
}
fn main() {
println!("{:?}",search_range(vec![5,7,7,8,8,10], 8));
} |
// For tests
#[allow(unused_imports)]
extern crate bytes;
#[allow(unused_imports)]
#[macro_use] extern crate fuel_line_derive;
#[allow(unused_imports)]
extern crate uuid;
pub trait Render {
fn render(&self) -> String;
}
#[macro_export]
macro_rules! templatify {
( $head_template:expr $(;$key:expr; $template:expr)* ) => {
{
let mut total_length = 0;
total_length = total_length + $head_template.len();
$(
total_length = total_length + $key.len() + $template.len();
)*
let mut output_string = String::with_capacity(total_length);
output_string.push_str($head_template);
$(
output_string.push_str($key);
output_string.push_str($template);
)*
output_string
}
}
}
#[macro_export]
macro_rules! templatify_buffer {
( $buffer:ident, $head_template:expr $(;$key:expr; $template:expr)* ) => {
{
let mut total_length = 0;
total_length = total_length + $head_template.len();
$(
total_length = total_length + $key.len() + $template.len();
)*
$buffer.reserve(total_length);
$buffer.put($head_template);
$(
$buffer.put($key);
$buffer.put($template);
)*
}
}
}
#[cfg(test)]
mod tests {
use bytes::{BytesMut, BufMut};
use Render;
use uuid::Uuid;
#[test]
fn templatify_should_work() {
let world = "world";
let results: String = templatify! { "hello, "; world ;"!" };
assert!(results == "hello, world!");
}
#[test]
fn templatify_buffer_should_work() {
let mut buf = BytesMut::new();
let world = "world";
templatify_buffer! { buf, "hello, "; world ;"!" };
assert!(buf == "hello, world!");
}
#[test]
fn render_derive_should_work() {
#[derive(Render)]
#[TemplateName = "./fuel_line/test_data/test.html"]
struct TestStruct {
a: String,
b: String
};
let t = TestStruct {
a: "a_value".to_owned(),
b: "b_value".to_owned()
};
assert!(t.render() == "<h1>b_value</h1>\n<p>a_value</p>\n");
}
}
|
use super::*;
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
#[repr(transparent)]
pub struct ObjAttr1(u16);
impl ObjAttr1 {
const_new!();
bitfield_int!(u16; 0..=8: u16, x_pos, with_x_pos, set_x_pos);
bitfield_int!(u16; 9..=13: u16, affine_index, with_affine_index, set_affine_index);
bitfield_bool!(u16; 12, hflip, with_hflip, set_hflip);
bitfield_bool!(u16; 13, vflip, with_vflip, set_vflip);
bitfield_int!(u16; 14..=15: u16, obj_size, with_obj_size, set_obj_size);
}
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
// TODO: add diagram
#![allow(dead_code)]
mod buffer_manager;
pub mod commit_phase;
pub mod errors;
pub mod execution_phase;
pub mod ordering_state_computer;
#[cfg(test)]
mod tests;
|
use serde_json::Number;
use serde_json::Value;
use crate::validator::{scope::ScopedSchema, state::ValidationState};
fn validate_as<T, F1, F2>(
scope: &ScopedSchema,
data: &Value,
schema_number_value: F1,
data_value: F2,
) -> ValidationState
where
F1: Fn(&Number) -> Option<T> + Copy,
F2: Fn(&Value) -> Option<T> + Copy,
T: std::fmt::Display + std::cmp::PartialOrd + std::ops::Rem<Output = T> + Copy,
{
let value = match data_value(data) {
Some(x) => x,
None => {
return ValidationState::new_with_error(scope.error(
"type",
format!("expected '{}'", scope.schema().r#type().primitive_type().as_ref()),
));
}
};
let schema = scope.schema();
let mut state = ValidationState::new();
if let Some(min) = schema.min().and_then(schema_number_value) {
if value < min {
state.push_error(scope.error("min", format!("expected '>= {}", min)));
}
}
if let Some(exclusive_min) = schema.exclusive_min().and_then(schema_number_value) {
if value <= exclusive_min {
state.push_error(scope.error("exclusiveMin", format!("expected '> {}", exclusive_min)));
}
}
if let Some(max) = schema.max().and_then(schema_number_value) {
if value > max {
state.push_error(scope.error("max", format!("expected '<= {}", max)));
}
}
if let Some(exclusive_max) = schema.exclusive_max().and_then(schema_number_value) {
if value >= exclusive_max {
state.push_error(scope.error("exclusiveMax", format!("expected '< {}", exclusive_max)));
}
}
let zero = schema_number_value(&Number::from(0)).unwrap();
if let Some(multiple_of) = schema.multiple_of().and_then(schema_number_value) {
if multiple_of > zero && value % multiple_of != zero {
state.push_error(scope.error(
"multipleOf",
format!("expected '{} % {} == {}'", value, multiple_of, zero),
));
}
}
state
}
pub fn validate_as_integer(scope: &ScopedSchema, data: &Value) -> ValidationState {
let state = validate_as(scope, data, Number::as_u64, Value::as_u64);
if state.is_valid() {
return state;
}
validate_as(scope, data, Number::as_i64, Value::as_i64)
}
pub fn validate_as_number(scope: &ScopedSchema, data: &Value) -> ValidationState {
let state = validate_as_integer(scope, data);
if state.is_valid() {
return state;
}
validate_as(scope, data, Number::as_f64, Value::as_f64)
}
|
//
// Climbing Ropes
//
// Simple program to play around with the 'rope'
// data structure. This is also my second program
// in Rust.
//
struct Rope<'a> {
len: usize,
left: Option<Box<Rope<'a>>>,
right: Option<Box<Rope<'a>>>,
data: &'a String,
}
fn main() {
println!("Hello, world!");
}
|
use ry::parse_path;
#[test]
fn test_parse_path() {
assert_eq!(parse_path("a.b.c").unwrap(), vec!["a", "b", "c"]);
}
#[test]
fn test_parse_path_with_quotes() {
assert_eq!(
parse_path("a.\"foo.bar\".c").unwrap(),
vec!["a", "foo.bar", "c"]
);
}
#[test]
fn test_parse_path_with_one_quote_errs() {
let result = parse_path("a.\"foo.bar.c");
assert_eq!(true, result.is_err());
assert_eq!(
true,
format!("{}", result.unwrap_err()).ends_with("no closing quote")
);
}
#[test]
fn test_parse_path_with_array_indexing() {
assert_eq!(
parse_path("a.foo[10].bar").unwrap(),
vec!["a", "foo", "[10]", "bar"]
);
}
#[test]
fn test_parse_path_with_parens() {
assert_eq!(
parse_path("a.(b.d==cat*).c").unwrap(),
vec!["a", "(b.d==cat*)", "c"]
);
}
#[test]
fn test_parse_path_with_one_open_array_errs() {
let result = parse_path("a.foo[1.bar");
assert_eq!(true, result.is_err());
assert_eq!(
true,
format!("{}", result.unwrap_err()).ends_with("no closing array character")
);
}
#[test]
fn test_parse_path_with_one_open_paren_errs() {
let result = parse_path("a.(b.d==cat*.c");
assert_eq!(true, result.is_err());
assert_eq!(
true,
format!("{}", result.unwrap_err()).ends_with("no closing paren character")
);
}
#[test]
fn test_parse_path_with_open_array_start_errs() {
let result = parse_path("a.foo]1].bar");
assert_eq!(true, result.is_err());
assert_eq!(
true,
format!("{}", result.unwrap_err()).ends_with("closing array character before opening")
);
}
#[test]
fn test_parse_path_with_close_paren_start_errs() {
let result = parse_path("a.)b.d==cat*.c)");
assert_eq!(true, result.is_err());
assert_eq!(
true,
format!("{}", result.unwrap_err()).ends_with("closing paren character before opening")
);
}
#[test]
fn test_parse_path_with_child_value_filtering() {
assert_eq!(
parse_path("animals(.==cat)").unwrap(),
vec!["animals", "(.==cat)"]
);
}
|
// Std
use std::convert::Infallible;
// Crates
use anyhow::Result;
use sqlx::sqlite::SqlitePool;
use tracing::info;
use warp::reply::Reply;
use warp::{http, Filter};
// Modules
use crate::db;
use crate::{Entry, Project};
fn json_body_entry() -> impl Filter<Extract = (Entry,), Error = warp::Rejection> + Clone {
warp::body::content_length_limit(1024 * 16).and(warp::body::json())
}
fn json_body_project() -> impl Filter<Extract = (Project,), Error = warp::Rejection> + Clone {
warp::body::content_length_limit(1024 * 16).and(warp::body::json())
}
fn with_pool(
pool: SqlitePool,
) -> impl Filter<Extract = (SqlitePool,), Error = std::convert::Infallible> + Clone {
warp::any().map(move || pool.clone())
}
// Filters
pub fn post_entry(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::path!("entry")
.and(warp::post())
.and(json_body_entry())
.and(with_pool(pool))
.and_then(new_entry)
}
pub fn get_entry(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::get()
// .and(warp::path!("entry" / i32))
.and(warp::path("entry"))
.and(warp::path::param::<i32>())
.and(with_pool(pool))
.and_then(read_entry)
}
pub fn get_entries_between(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::get()
.and(warp::path!("entries_between" / String / String))
.and(with_pool(pool))
.and_then(entries_between)
}
pub fn read_last_entry(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::get()
.and(warp::path("last_entry"))
.and(with_pool(pool))
.and_then(last_entry)
}
pub fn update_entry(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::post()
.and(warp::path("update_entry"))
.and(json_body_entry())
.and(with_pool(pool))
.and_then(update_entry_handler)
}
pub fn delete_entry(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::post()
.and(warp::path("delete_entry"))
.and(warp::path::param::<i32>())
.and(with_pool(pool))
.and_then(delete_entry_handler)
}
pub fn delete_last_entry(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::post()
.and(warp::path("delete_last_entry"))
.and(with_pool(pool))
.and_then(delete_last_entry_handler)
}
pub fn post_project(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::path!("project")
.and(warp::post())
.and(json_body_project())
.and(with_pool(pool))
.and_then(new_project)
}
pub fn get_project(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::get()
.and(warp::path("project"))
.and(warp::path::param::<i32>())
.and(with_pool(pool))
.and_then(read_project)
}
pub fn get_all_projects(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::get()
.and(warp::path("all_projects"))
.and(with_pool(pool))
.and_then(read_all_projects)
}
pub fn update_project(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::post()
.and(warp::path("update_project"))
.and(json_body_project())
.and(with_pool(pool))
.and_then(update_project_handler)
}
pub fn delete_project(
pool: SqlitePool,
) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
warp::post()
.and(warp::path("delete_project"))
.and(warp::path::param::<String>())
.and(with_pool(pool))
.and_then(delete_project_handler)
}
// Handlers
async fn new_entry(entry: Entry, pool: SqlitePool) -> Result<impl warp::Reply, Infallible> {
info!("Processing new entry");
match db::write_entry(&pool, &entry).await {
Ok(_) => Ok(http::StatusCode::OK),
Err(_) => Ok(http::StatusCode::BAD_REQUEST),
}
}
async fn read_entry(id: i32, pool: SqlitePool) -> Result<warp::reply::Response, Infallible> {
info!("Reading entry #{}", id);
match db::read_entry(&pool, id).await {
Ok(entry) => Ok(warp::reply::json(&entry).into_response()),
Err(_) => Ok(
warp::reply::with_status("Invalid id", http::StatusCode::BAD_REQUEST).into_response(),
),
}
}
async fn entries_between(
start: String,
stop: String,
pool: SqlitePool,
) -> Result<impl warp::Reply, Infallible> {
info!("Reading entries between {} and {}", start, stop);
match db::read_entries_between(&pool, start, stop).await {
Ok(entries) => Ok(warp::reply::json(&entries).into_response()),
Err(_) => Ok(
warp::reply::with_status("Invalid date range", http::StatusCode::BAD_REQUEST)
.into_response(),
),
}
}
async fn last_entry(pool: SqlitePool) -> Result<impl warp::Reply, Infallible> {
info!("Reading most recent entry.");
match db::read_last_entry(&pool).await {
Ok(entry) => Ok(warp::reply::json(&entry).into_response()),
Err(_) => Ok(warp::reply::with_status(
"Failed to read last entry.",
http::StatusCode::INTERNAL_SERVER_ERROR,
)
.into_response()),
}
}
async fn update_entry_handler(
entry: Entry,
pool: SqlitePool,
) -> Result<impl warp::Reply, Infallible> {
info!("Reading most recent entry.");
match db::update_entry(&pool, &entry).await {
Ok(_) => Ok(http::StatusCode::OK),
Err(_) => Ok(http::StatusCode::BAD_REQUEST),
}
}
async fn delete_entry_handler(id: i32, pool: SqlitePool) -> Result<impl warp::Reply, Infallible> {
info!("Deleting entry #{}", id);
match db::delete_entry(&pool, id).await {
Ok(_) => Ok(http::StatusCode::OK),
Err(_) => Ok(http::StatusCode::BAD_REQUEST),
}
}
async fn delete_last_entry_handler(pool: SqlitePool) -> Result<impl warp::Reply, Infallible> {
info!("Deleting most recent entry.");
match db::delete_last_entry(&pool).await {
Ok(_) => Ok(http::StatusCode::OK),
Err(_) => Ok(http::StatusCode::INTERNAL_SERVER_ERROR),
}
}
async fn new_project(project: Project, pool: SqlitePool) -> Result<impl warp::Reply, Infallible> {
info!("Creating a new project.");
match db::write_project(&pool, &project).await {
Ok(_) => Ok(http::StatusCode::OK),
Err(_) => Ok(http::StatusCode::BAD_REQUEST),
}
}
async fn read_project(id: i32, pool: SqlitePool) -> Result<warp::reply::Response, Infallible> {
info!("Reading project #{}", id);
match db::read_project(&pool, id).await {
Ok(project) => Ok(warp::reply::json(&project).into_response()),
Err(_) => Ok(
warp::reply::with_status("Invalid id", http::StatusCode::BAD_REQUEST).into_response(),
),
}
}
async fn read_all_projects(pool: SqlitePool) -> Result<warp::reply::Response, Infallible> {
info!("Reading all projects.");
match db::read_all_projects(&pool).await {
Ok(projects) => Ok(warp::reply::json(&projects).into_response()),
Err(_) => Ok(
warp::reply::with_status("Invalid id", http::StatusCode::BAD_REQUEST).into_response(),
),
}
}
async fn update_project_handler(
project: Project,
pool: SqlitePool,
) -> Result<impl warp::Reply, Infallible> {
info!("Updating project.");
match db::update_project(&pool, &project).await {
Ok(_) => Ok(http::StatusCode::OK),
Err(_) => Ok(http::StatusCode::BAD_REQUEST),
}
}
async fn delete_project_handler(
code: String,
pool: SqlitePool,
) -> Result<impl warp::Reply, Infallible> {
info!("Deleting project: {}", code);
match db::delete_project(&pool, code).await {
Ok(_) => Ok(warp::reply::with_status(
"Entry deleted.",
http::StatusCode::OK,
)),
Err(_) => Ok(warp::reply::with_status(
"Error deleting entry.",
http::StatusCode::BAD_REQUEST,
)),
}
}
#[cfg(test)]
mod tests {
use super::*;
use bytes::Bytes;
use fake::{Fake, Faker};
use serde_json;
#[tokio::test]
async fn test_get_entry() -> Result<()> {
let pool = db::tests::setup_test_db().await?;
db::tests::setup_entries_table(&pool).await?;
let mut exp_entry: db::Entry = Faker.fake();
exp_entry.id = Some(1);
db::write_entry(&pool, &exp_entry).await?;
let filter = get_entry(pool);
let res = warp::test::request()
.method("GET")
.path("/entry/1")
.reply(&filter)
.await;
let exp_json = Bytes::from(serde_json::to_string(&exp_entry).unwrap());
assert_eq!(res.status(), 200);
assert_eq!(res.body(), &exp_json);
Ok(())
}
#[tokio::test]
async fn test_post_entry() -> Result<()> {
let pool = db::tests::setup_test_db().await?;
db::tests::setup_entries_table(&pool).await?;
let mut exp_entry: db::Entry = Faker.fake();
exp_entry.id = Some(1);
let exp_json = Bytes::from(serde_json::to_string(&exp_entry).unwrap());
// db::write_entry(&pool, &exp_entry).await?;
let filter = post_entry(pool.clone());
let res = warp::test::request()
.method("POST")
.path("/entry")
.body(&exp_json)
.reply(&filter)
.await;
assert_eq!(res.status(), 200);
let entry = db::read_entry(&pool, exp_entry.id.unwrap()).await?;
assert_eq!(&entry, &exp_entry);
Ok(())
}
#[tokio::test]
async fn test_update_entry() -> Result<()> {
let pool = db::tests::setup_test_db().await?;
db::tests::setup_entries_table(&pool).await?;
let mut exp_entry: db::Entry = Faker.fake();
let id = db::write_entry(&pool, &exp_entry).await?;
exp_entry.id = Some(id);
exp_entry.start = String::from("0900");
exp_entry.stop = String::from("1100");
exp_entry.code = String::from("20-008");
exp_entry.memo = String::from("work, work, work");
let exp_json = Bytes::from(serde_json::to_string(&exp_entry).unwrap());
let filter = update_entry(pool.clone());
let res = warp::test::request()
.method("POST")
.path("/update_entry")
.body(&exp_json)
.reply(&filter)
.await;
assert_eq!(res.status(), 200);
let entry = db::read_entry(&pool, exp_entry.id.unwrap()).await?;
assert_eq!(&entry, &exp_entry);
Ok(())
}
#[tokio::test]
async fn test_delete_entry() -> Result<()> {
let pool = db::tests::setup_test_db().await?;
db::tests::setup_entries_table(&pool).await?;
let mut entry: db::Entry = Faker.fake();
entry.id = Some(1);
db::write_entry(&pool, &entry).await?;
let filter = delete_entry(pool.clone());
let res = warp::test::request()
.method("POST")
.path("/delete_entry/1")
.reply(&filter)
.await;
assert_eq!(res.status(), 200);
// Entry should not exist.
let _ = db::read_entry(&pool, entry.id.unwrap()).await.is_err();
assert_eq!(res.body(), "Entry deleted.");
Ok(())
}
#[tokio::test]
async fn test_get_project() -> Result<()> {
let pool = db::tests::setup_test_db().await?;
db::tests::setup_projects_table(&pool).await?;
let mut exp_project: db::Project = Faker.fake();
exp_project.id = Some(1);
db::write_project(&pool, &exp_project).await?;
let filter = get_project(pool);
let res = warp::test::request()
.method("GET")
.path("/project/1")
.reply(&filter)
.await;
let exp_json = Bytes::from(serde_json::to_string(&exp_project).unwrap());
assert_eq!(res.status(), 200);
assert_eq!(res.body(), &exp_json);
Ok(())
}
#[tokio::test]
async fn test_post_project() -> Result<()> {
let pool = db::tests::setup_test_db().await?;
db::tests::setup_projects_table(&pool).await?;
let mut exp_project: db::Project = Faker.fake();
exp_project.id = Some(1);
let exp_json = Bytes::from(serde_json::to_string(&exp_project).unwrap());
let filter = post_project(pool.clone());
let res = warp::test::request()
.method("POST")
.path("/project")
.body(&exp_json)
.reply(&filter)
.await;
assert_eq!(res.status(), 200);
let project = db::read_project(&pool, exp_project.id.unwrap()).await?;
assert_eq!(&project, &exp_project);
Ok(())
}
#[tokio::test]
async fn test_update_project() -> Result<()> {
let pool = db::tests::setup_test_db().await?;
db::tests::setup_projects_table(&pool).await?;
let mut exp_project: db::Project = Faker.fake();
let id = db::write_project(&pool, &exp_project).await?;
exp_project.id = Some(id);
exp_project.name = String::from("General Support");
exp_project.code = String::from("20-008");
let exp_json = Bytes::from(serde_json::to_string(&exp_project).unwrap());
let filter = update_project(pool.clone());
let res = warp::test::request()
.method("POST")
.path("/update_project")
.body(&exp_json)
.reply(&filter)
.await;
assert_eq!(res.status(), 200);
let project = db::read_project(&pool, exp_project.id.unwrap()).await?;
assert_eq!(&project, &exp_project);
Ok(())
}
#[tokio::test]
async fn test_delete_project() -> Result<()> {
let pool = db::tests::setup_test_db().await?;
db::tests::setup_projects_table(&pool).await?;
let mut project: db::Project = Faker.fake();
project.id = Some(1);
let code = project.code.clone();
db::write_project(&pool, &project).await?;
let filter = delete_project(pool.clone());
let res = warp::test::request()
.method("POST")
.path(&format!("/delete_project/{}", &code))
.reply(&filter)
.await;
assert_eq!(res.status(), 200);
// Entry should not exist.
let _ = db::read_project(&pool, project.id.unwrap()).await.is_err();
assert_eq!(res.body(), "Entry deleted.");
Ok(())
}
}
|
mod database;
mod document_attr_key;
mod indexer;
mod number;
mod ranked_map;
mod serde;
pub use rocksdb;
pub use self::database::{Database, Index, CustomSettings};
pub use self::number::Number;
pub use self::ranked_map::RankedMap;
pub use self::serde::compute_document_id;
|
use std::sync::{Mutex, Arc};
use std::thread;
use crossbeam_channel;
use crossbeam_channel::TryRecvError;
use crate::crl::{RequestCompletionHandler, Crl};
use crate::crl;
use crate::object;
use crate::transaction;
use super::*;
// Used to indicate that an entry is full and cannot accept any more data
struct EntryFull;
pub(self) struct LogState {
receiver: crossbeam_channel::Receiver<Request>,
entry_window_size: usize,
transactions: HashMap<TxId, RefCell<Tx>>,
allocations: HashMap<TxId, RefCell<Alloc>>,
next_entry_serial: LogEntrySerialNumber,
last_entry_location: FileLocation,
earliest_entry_needed: LogEntrySerialNumber,
completion_handlers: Vec<Arc<dyn RequestCompletionHandler + Send + Sync>>,
last_notified_serial: LogEntrySerialNumber,
pending_completions: HashMap<LogEntrySerialNumber, (bool, Vec<Completion>)>,
/// Channels do not have a peek option so if we cannot add a request to the current entry
/// it will be placed here for the next entry to consume
next_request: Option<Request>
}
impl LogState {
pub(super) fn new(
receiver: crossbeam_channel::Receiver<Request>,
entry_window_size: usize, // ensures we never need to read more than window_size entries during recovery
recovered_transactions: &Vec<RecoveredTx>,
recovered_allocations: &Vec<RecoveredAlloc>,
last_entry_serial: LogEntrySerialNumber,
last_entry_location: FileLocation) -> Arc<Mutex<LogState>> {
let mut transactions = HashMap::new();
let mut allocations = HashMap::new();
for rtx in recovered_transactions {
transactions.insert(rtx.id.clone(), RefCell::new(Tx {
id: rtx.id.1.clone(),
txd_location: Some(rtx.txd_location),
data_locations: Some(rtx.update_locations.iter().map(|ou| ou.1).collect()),
state: TransactionRecoveryState {
transaction_id: rtx.id.1.clone(),
store_id: rtx.id.0,
serialized_transaction_description: rtx.serialized_transaction_description.clone().into(),
object_updates: rtx.object_updates.clone(),
tx_disposition: rtx.tx_disposition,
paxos_state: rtx.paxos_state
},
last_entry_serial: rtx.last_entry_serial
}));
}
for ra in recovered_allocations {
allocations.insert(ra.id.clone(), RefCell::new(Alloc {
data_location: Some(ra.data_location),
state: AllocationRecoveryState {
store_id: ra.id.0,
store_pointer: ra.store_pointer.clone(),
id: ra.object_id,
kind: ra.kind,
size: ra.size,
data: ArcDataSlice::from(ra.data.clone()),
refcount: ra.refcount,
timestamp: ra.timestamp,
allocation_transaction_id: ra.id.1,
serialized_revision_guard: ra.serialized_revision_guard.clone()
},
last_entry_serial: ra.last_entry_serial
}));
}
let earliest_entry_needed = transactions.iter().map(|(_,v)| v.borrow().last_entry_serial).chain(
allocations.iter().map(|(_,v)| v.borrow().last_entry_serial)
).fold(last_entry_serial.next(), |a, s| {
if s < a {
s
} else {
a
}
});
Arc::new( Mutex::new( LogState {
receiver,
entry_window_size,
transactions,
allocations,
next_entry_serial: last_entry_serial.next(),
last_entry_location: last_entry_location,
earliest_entry_needed,
completion_handlers: Vec::new(),
pending_completions: HashMap::new(),
last_notified_serial: last_entry_serial,
next_request: None
}))
}
fn prune_data_stored_in_file(
&mut self,
file_id: FileId,
entry: &mut Entry) -> Result<(), EntryFull> {
for (tx_id, tx) in &self.transactions {
let mut mtx = tx.borrow_mut();
let add = match mtx.txd_location.as_ref() {
Some(loc) => {
let mut add_it = false;
if loc.file_id == file_id {
mtx.txd_location = None;
add_it = true;
}
if let Some(locations) = &mtx.data_locations {
for l in locations {
if l.file_id == file_id {
mtx.data_locations = None;
add_it = true;
break;
}
}
}
add_it
},
None => false
};
drop(mtx);
if add {
entry.add_transaction(tx_id, tx, None)?;
}
}
for (tx_id, a) in &self.allocations {
let mut ma = a.borrow_mut();
let add = match ma.data_location.as_ref() {
None => false,
Some(l) => {
if l.file_id == file_id {
ma.data_location = None;
true
} else {
false
}
}
};
drop(ma);
if add {
entry.add_allocation(tx_id, a, None)?;
}
}
Ok(())
}
fn add_request(&mut self,
entry: &mut Entry,
request: &Request) -> RequestResult {
// don't have to create it again at least
// Check entry first. If we can't add it to the entry, we don't need to handle
// the request (it'll just be re-attempted later)
match request {
Request::SaveTransactionState {
client_request,
store_id,
transaction_id,
serialized_transaction_description,
object_updates,
tx_disposition,
paxos_state
} => {
let txid = TxId(*store_id, *transaction_id);
match self.transactions.get(&txid) {
Some(tx) => {
let mut mtx = tx.borrow_mut();
mtx.state.tx_disposition = *tx_disposition;
mtx.state.paxos_state = *paxos_state;
// Check for late arrival of object update content
if mtx.state.object_updates.len() == 0 && object_updates.len() != 0 {
mtx.state.object_updates = object_updates.clone();
mtx.data_locations = None;
}
drop(mtx);
match entry.add_transaction(&txid, &tx, Some(client_request)) {
Ok(_) => RequestResult::Okay,
Err(_) => RequestResult::EntryIsFull
}
},
None => {
let tx = RefCell::new(Tx {
id: *transaction_id,
txd_location: None,
data_locations: None,
state: TransactionRecoveryState {
transaction_id: *transaction_id,
store_id: *store_id,
serialized_transaction_description: serialized_transaction_description.clone(),
object_updates: object_updates.clone(),
tx_disposition: *tx_disposition,
paxos_state: *paxos_state
},
last_entry_serial: self.next_entry_serial
});
match entry.add_transaction(&txid, &tx, Some(client_request)) {
Ok(_) => {
self.transactions.insert(txid, tx);
RequestResult::Okay
},
Err(_) => RequestResult::EntryIsFull
}
}
}
},
Request::DropTransactionData {
store_id,
transaction_id,
} => {
let txid = TxId(*store_id, *transaction_id);
if let Some(tx) = self.transactions.get(&txid) {
let mut mtx = tx.borrow_mut();
mtx.data_locations = None;
mtx.state.object_updates.clear();
// can safely ignore errors here
drop(mtx);
match entry.add_transaction(&txid, &tx, None) {
Ok(_) => RequestResult::Okay,
Err(_) => RequestResult::EntryIsFull
}
} else {
RequestResult::Okay
}
},
Request::DeleteTransactionState {
store_id,
transaction_id,
} => {
let txid = TxId(*store_id, *transaction_id);
match entry.drop_transaction(&txid) {
Ok(_) => {
self.transactions.remove(&txid);
RequestResult::Okay
},
Err(_) => {
RequestResult::EntryIsFull
}
}
},
Request::SaveAllocationState {
client_request,
state
} => {
let a = RefCell::new(Alloc{
data_location: None,
state: state.clone(),
last_entry_serial: self.next_entry_serial
});
let txid = TxId(state.store_id, state.allocation_transaction_id);
match entry.add_allocation(&txid, &a, Some(client_request)) {
Ok(_) => {
self.allocations.insert(txid, a);
RequestResult::Okay
},
Err(_) => RequestResult::EntryIsFull
}
},
Request::DeleteAllocationState {
store_id,
allocation_transaction_id,
} => {
let txid = TxId(*store_id, *allocation_transaction_id);
match entry.drop_allocation(&txid) {
Ok(_) => {
self.allocations.remove(&txid);
RequestResult::Okay
},
Err(_) => {
RequestResult::EntryIsFull
}
}
},
Request::GetFullRecoveryState {
store_id,
sender
} => {
let mut txs: Vec<TransactionRecoveryState> = Vec::new();
let mut allocs: Vec<AllocationRecoveryState> = Vec::new();
for (_, tx) in &self.transactions {
let tx = tx.borrow();
if tx.state.store_id == *store_id {
txs.push(tx.state.clone());
}
}
for (_, a) in &self.allocations {
let a = a.borrow();
if a.state.store_id == *store_id {
allocs.push(a.state.clone());
}
}
// Intentionally ignore any send errors
sender.send(FullStateResponse(txs, allocs)).unwrap_or(());
RequestResult::Okay
},
Request::RegisterClientRequest {
sender,
handler
} => {
let client_id = ClientId(self.completion_handlers.len());
self.completion_handlers.push(handler.clone());
// Intentionally ignore any send errors
sender.send(RegisterClientResponse { client_id }).unwrap_or(());
RequestResult::Okay
},
Request::Terminate => RequestResult::Terminate
}
}
/// Moves transactions and allocation descriptions behind the entry window size into
/// the current entry. The goal here is to restrict the number of entries that must be
/// read during crash recovery. Returns true if all necessary entries have been migrated
/// forwad. False otherwise.
fn migrate_earliest_entry(&mut self, entry: &mut Entry) -> bool {
// If this entry falls on a window-size boundary, put all transactions and allocations
// written behind the window-size into the entry buffer
if self.next_entry_serial.0 % self.entry_window_size as u64 != 0 {
true // nothing to do
} else {
let earliest = self.next_entry_serial.0 as u64 - self.entry_window_size as u64;
let earliest = LogEntrySerialNumber(earliest);
for (txid, tx) in &self.transactions {
if tx.borrow().last_entry_serial < earliest {
match entry.add_transaction(txid, tx, None) {
Ok(_) => (),
Err(_) => return false
}
}
}
for (txid, a) in &self.allocations {
if a.borrow().last_entry_serial < earliest {
match entry.add_allocation(txid, a, None) {
Ok(_) => (),
Err(_) => return false
}
}
}
self.earliest_entry_needed = earliest;
true
}
}
/// Creates the log entry and returns it as a vector of ArcDataSlice.
///
/// Returns: (Optional EntrySerialNumber, FileLocation of this entry, entry data to be passed to the stream)
pub fn create_log_entry(&mut self,
entry: &mut Entry,
stream: &Box<dyn FileStream>) -> (LogEntrySerialNumber, Vec::<ArcDataSlice>) {
let mut txs: Vec<&RefCell<Tx>> = Vec::new();
let mut allocs: Vec<&RefCell<Alloc>> = Vec::new();
for txid in &entry.tx_set {
self.transactions.get(&txid).map( |tx| txs.push(tx) );
}
for txid in &entry.allocs {
self.allocations.get(&txid).map( |a| allocs.push(a) );
}
let entry_serial = self.next_entry_serial;
let (entry_location, buffers) = encoding::log_entry(
entry_serial, self.earliest_entry_needed, self.last_entry_location,
&txs, &allocs,
&entry.tx_deletions, &entry.alloc_deletions, stream
);
self.last_entry_location = entry_location;
self.next_entry_serial = self.next_entry_serial.next();
(entry_serial, buffers)
}
}
enum RequestResult {
Okay,
EntryIsFull,
Terminate
}
pub(super) struct Backend {
io_threads: Vec<thread::JoinHandle<()>>,
pub sender: crossbeam_channel::Sender<Request>
}
impl crate::crl::Backend for Backend {
fn shutdown(&mut self) {
self.shutdown_impl();
}
fn new_interface(&self,
save_handler: sync::Arc<dyn RequestCompletionHandler + Send + Sync>) -> Box<dyn Crl> {
let (response_sender, receiver) = crossbeam_channel::unbounded();
self.sender.send(Request::RegisterClientRequest{sender: response_sender, handler: save_handler}).unwrap();
let client_id = receiver.recv().unwrap().client_id;
Box::new(frontend::Frontend::new(client_id, self.sender.clone()))
}
}
impl Backend {
pub fn shutdown_impl(&mut self) {
for _ in &self.io_threads {
// Intentionally ignore send errors
self.sender.send(Request::Terminate).unwrap_or(());
}
while !self.io_threads.is_empty() {
self.io_threads.pop().map(|t| t.join());
}
}
pub fn recover(
crl_directory: &Path,
entry_window_size: usize,
max_entry_operations: usize,
num_streams: usize,
max_file_size: usize
) -> Result<Backend, std::io::Error> {
let mut r = log_file::recover(crl_directory, max_file_size, num_streams)?;
assert!(r.log_files.len() % 3 == 0);
let (sender, receiver) = crossbeam_channel::unbounded();
let log_state = LogState::new(
receiver, entry_window_size,
&r.transactions, &r.allocations,
r.last_entry_serial, r.last_entry_location);
let mut io_threads = Vec::new();
while r.log_files.len() > 0 {
let f3 = r.log_files.pop().unwrap();
let f2 = r.log_files.pop().unwrap();
let f1 = r.log_files.pop().unwrap();
let ls = log_state.clone();
let handle = thread::spawn( move || {
io_thread(ls, tri_file_stream::TriFileStream::new(f1, f2, f3), max_entry_operations);
});
io_threads.push(handle);
}
let be = Backend{
io_threads,
sender,
};
Ok(be)
}
}
fn io_thread(log_state: Arc<Mutex<LogState>>, mut stream: Box<dyn FileStream>, max_entry_operations: usize) {
let max_file_size = stream.const_max_file_size();
let mut prune_file: Option<FileId> = None;
let mut entry = Entry::new(max_file_size, max_entry_operations);
'top_level: loop {
entry.reset(&stream);
let (prune_required, entry_serial, buffers) = {
// ---------- Lock Log State Mutex ----------
let mut state = log_state.lock().unwrap(); // Panic if lock fails
// First prune all content from our to-be-pruned file, if we have a file to prune.
// This action alone may entirely fill the entry buffer (and may even take
// multiple passes).
let pruned = match prune_file {
None => true,
Some(prune_id) => {
match state.prune_data_stored_in_file(prune_id, &mut entry) {
Ok(_) => {
prune_file = None; // Prune Complete!
true
},
Err(_) => false
}
}
};
let migrated = pruned && state.migrate_earliest_entry(&mut entry);
if pruned && migrated {
// If the last entry wasn't able to process all entries it read from the
// channel, it will have left the last entry it read in the next_request field
// of the log state object. Handle that before reading new entries
if ! state.next_request.is_none() {
if let Some(request) = state.next_request.clone() {
match state.add_request(&mut entry, &request) {
RequestResult::Okay => state.next_request = None,
RequestResult::EntryIsFull => (),
RequestResult::Terminate => break 'top_level
}
}
}
// If we weren't able to handle the pending request, the entry must already
// be full so we can skip reading from the channel. Otherwise, we'll read from
// the channel until the entry is full or we need to do a blocking read.
if state.next_request.is_none() {
'read_loop: loop {
match state.receiver.try_recv() {
Ok(request) => {
match state.add_request(&mut entry, &request) {
RequestResult::Okay => (),
RequestResult::EntryIsFull => {
state.next_request = Some(request);
break 'read_loop;
},
RequestResult::Terminate => break 'top_level
};
},
Err(e) => {
match e {
TryRecvError::Disconnected => break 'top_level,
TryRecvError::Empty => {
if ! entry.is_empty() {
// The channel is empty and we have content to
// write
break 'read_loop;
} else {
// channel and entry are empty
// Block here awaiting entry content. Note that
// we're HOLDING the state mutex while blocking
// that's what we want.
match state.receiver.recv() {
// The only reason channel.recv() can error
// here is due the channel being both empty
// and broken. Terminate this IO thread.
Err(_) => break 'top_level,
Ok(request) => {
match state.add_request(&mut entry, &request) {
RequestResult::Okay => (),
RequestResult::EntryIsFull => {
state.next_request = Some(request);
break 'read_loop;
},
RequestResult::Terminate => {
break 'top_level;
}
};
}
}
}
}
}
}
}
}
}
}
let (entry_serial, buffers) = state.create_log_entry(&mut entry, &stream);
let prune_required = ! state.next_request.is_none();
(prune_required, entry_serial, buffers)
}; // ---------- Unlock Log State Mutex ----------
// Blocking Write
let success = match stream.write(buffers) {
Ok(_) => true,
Err(_) => {
// TODO - Log error
false
}
};
if prune_required || entry.is_empty() {
match stream.rotate_files() {
Ok(fid) => prune_file = fid,
Err(_) => {
// TODO: Log error and exit thread
// This stream is busted. Exit thread
break 'top_level
}
}
}
{ // ---------- Lock Log State Mutex ----------
let mut state = log_state.lock().unwrap(); // Panic if lock fails
if entry_serial == state.last_notified_serial.next() {
// Run completion handlers for this entry
for cr in &entry.requests {
match cr {
Completion::TxSave {
client_id,
store_id,
transaction_id,
save_id
} => {
state.completion_handlers[client_id.0].complete(
crl::Completion::TransactionSave {
store_id: *store_id,
transaction_id: *transaction_id,
save_id: *save_id,
success
}
)
},
Completion::AllocSave {
client_id,
store_id,
transaction_id,
object_id
} => {
state.completion_handlers[client_id.0].complete(
crl::Completion::AllocationSave {
store_id: *store_id,
transaction_id: *transaction_id,
object_id: *object_id,
success
}
)
}
}
}
state.last_notified_serial = entry_serial;
// Run completion handlers for further entries that have completed
loop {
let next = state.last_notified_serial.next();
match state.pending_completions.remove(&next) {
None => break,
Some(t) => {
let (psuccess, v) = t;
for cr in v {
match cr {
Completion::TxSave {
client_id,
store_id,
transaction_id,
save_id
} => {
state.completion_handlers[client_id.0].complete(
crl::Completion::TransactionSave {
store_id,
transaction_id,
save_id,
success: psuccess
}
)
},
Completion::AllocSave {
client_id,
transaction_id,
store_id,
object_id
} => {
state.completion_handlers[client_id.0].complete(
crl::Completion::AllocationSave {
store_id,
transaction_id,
object_id,
success: psuccess
}
)
}
}
}
state.last_notified_serial = next;
}
}
}
} else {
state.pending_completions.insert(entry_serial, (success, entry.requests.clone()));
}
} // ---------- Unlock Log State Mutex ----------
}
}
#[derive(Copy, Clone)]
enum Completion {
TxSave {
client_id: ClientId,
store_id: store::Id,
transaction_id: transaction::Id,
save_id: TxSaveId
},
AllocSave {
client_id: ClientId,
transaction_id: transaction::Id,
store_id: store::Id,
object_id: object::Id
}
}
struct Entry {
requests: Vec<Completion>,
tx_set: HashSet<TxId>,
tx_deletions: Vec<TxId>,
allocs: Vec<TxId>,
alloc_deletions: Vec<TxId>,
max_size: usize,
max_operations: usize,
size: usize,
offset: usize,
operations: usize
}
impl Entry {
fn new(max_file_size: usize, max_operations: usize) -> Entry {
Entry {
requests: Vec::new(),
tx_set: HashSet::new(),
tx_deletions: Vec::new(),
allocs: Vec::new(),
alloc_deletions: Vec::new(),
max_size: max_file_size,
max_operations,
size: 0,
offset: 0,
operations: 0
}
}
fn reset(&mut self, stream: &Box<dyn FileStream>) {
self.requests.clear();
self.tx_set.clear();
self.tx_deletions.clear();
self.allocs.clear();
self.alloc_deletions.clear();
self.size = 0;
let (_, _, file_offset) = stream.status();
self.offset = file_offset;
self.operations = 0;
}
fn is_empty(&self) -> bool {
self.tx_set.is_empty() &&
self.tx_deletions.is_empty() &&
self.allocs.is_empty() &&
self.alloc_deletions.is_empty()
}
fn have_room_for(&self, nbytes: usize) -> bool {
self.offset + self.size + nbytes + STATIC_ENTRY_SIZE as usize + 4096 <= self.max_size
}
fn add_transaction(&mut self,
tx_id: &TxId,
tx: &RefCell<Tx>,
req: Option<&ClientRequest>) -> Result<(), EntryFull> {
if self.operations == self.max_operations {
return Err(EntryFull{});
}
if self.tx_set.contains(tx_id) {
// Transaction is already part of this entry
if let Some(cr) = req {
self.requests.push(Completion::TxSave {
client_id: cr.client_id,
store_id: cr.store_id,
transaction_id: cr.transaction_id,
save_id: cr.save_id
});
}
return Ok(());
} else {
let esize = encoding::tx_write_size(tx);
if self.have_room_for(esize) {
self.operations += 1;
self.tx_set.insert(tx_id.clone());
self.size += esize;
if let Some(cr) = req {
self.requests.push(Completion::TxSave{
client_id: cr.client_id,
store_id: cr.store_id,
transaction_id: cr.transaction_id,
save_id: cr.save_id
});
}
return Ok(());
} else {
return Err(EntryFull{});
}
}
}
fn add_allocation(&mut self,
tx_id: &TxId,
alloc: &RefCell<Alloc>,
req: Option<&ClientRequest>) -> Result<(), EntryFull> {
if self.operations == self.max_operations {
return Err(EntryFull{});
}
let object_id = alloc.borrow().state.id;
let asize = encoding::alloc_write_size(alloc);
if self.have_room_for(asize) {
self.operations += 1;
self.allocs.push(tx_id.clone());
self.size += asize;
if let Some(cr) = req {
self.requests.push(Completion::AllocSave {
client_id: cr.client_id,
transaction_id: tx_id.1,
store_id: cr.store_id,
object_id
});
}
return Ok(());
} else {
return Err(EntryFull{});
}
}
fn drop_transaction(&mut self, tx_id: &TxId) -> Result<(), EntryFull> {
if self.operations == self.max_operations {
return Err(EntryFull{});
}
let tdsize = encoding::tx_delete_size(tx_id);
if self.have_room_for(tdsize) {
self.operations += 1;
self.tx_deletions.push(tx_id.clone());
self.size += tdsize;
return Ok(());
} else {
return Err(EntryFull{});
}
}
fn drop_allocation(&mut self, tx_id: &TxId) -> Result<(), EntryFull> {
if self.operations == self.max_operations {
return Err(EntryFull{});
}
let adsize = encoding::alloc_delete_size(&tx_id);
if self.have_room_for(adsize) {
self.operations += 1;
self.alloc_deletions.push(tx_id.clone());
self.size += adsize;
return Ok(());
} else {
return Err(EntryFull{});
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.