text stringlengths 8 4.13M |
|---|
mod http;
mod server;
mod storage;
pub use crate::server::Server;
pub use crate::storage::{
dynamo_db_driver::DynamoDbDriver,
storage_actor::StorageExecutor,
storage_driver::{StorageCmd, StorageDriver},
};
|
//! Host I/O
use core::{fmt, slice};
use core::fmt::Write;
/// File descriptors
const STDOUT: usize = 1;
const STDERR: usize = 2;
/// Host's standard error
struct Stderr;
/// Host's standard output
struct Stdout;
fn write_all(fd: usize, mut buffer: &[u8]) {
while !buffer.is_empty() {
match unsafe { syscall!(WRITE, fd, buffer.as_ptr(), buffer.len()) } {
// Done
0 => return,
// `n` bytes were not written
n => {
let offset = (buffer.len() - n) as isize;
buffer = unsafe {
slice::from_raw_parts(buffer.as_ptr().offset(offset as isize), n)
}
}
}
}
}
impl Stderr {
fn write_all(&mut self, buffer: &[u8]) {
write_all(STDERR, buffer);
}
}
impl Stdout {
fn write_all(&mut self, buffer: &[u8]) {
write_all(STDOUT, buffer);
}
}
impl Write for Stderr {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_all(s.as_bytes());
Ok(())
}
}
impl Write for Stdout {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_all(s.as_bytes());
Ok(())
}
}
/// Write a `buffer` to the host's stderr
pub fn ewrite(buffer: &[u8]) {
Stderr.write_all(buffer)
}
/// Write `fmt::Arguments` to the host's stderr
pub fn ewrite_fmt(args: fmt::Arguments) {
Stderr.write_fmt(args).ok();
}
/// Write a `string` to the host's stderr
pub fn ewrite_str(string: &str) {
Stderr.write_all(string.as_bytes())
}
/// Write a `buffer` to the host's stdout
pub fn write(buffer: &[u8]) {
Stdout.write_all(buffer)
}
/// Write `fmt::Arguments` to the host's stdout
pub fn write_fmt(args: fmt::Arguments) {
Stdout.write_fmt(args).ok();
}
/// Write a `string` to the host's stdout
pub fn write_str(string: &str) {
Stdout.write_all(string.as_bytes())
}
|
mod binary;
pub use binary::MemcacheBinary as Memcache;
|
use std::sync::Arc;
use axum::{
extract::{Path, State},
http::StatusCode,
response::{IntoResponse, Redirect, Response},
};
use maud::{html, Markup};
use rss::validation::Validate;
use tracing::instrument;
use crate::{
http_server::{
pages::blog::md::IntoHtml,
templates::{base_constrained, posts::BlogPostList},
},
posts::{
blog::{BlogPostPath, BlogPosts, MatchesPath, ToCanonicalPath},
date::PostedOn,
til::TilPosts,
Post, ToRssItem,
}, AppState,
};
pub(crate) mod md;
pub(crate) struct MyChannel(rss::Channel);
impl MyChannel {
#[instrument(skip_all)]
pub fn from_posts<T>(state: AppState, posts: &[&Post<T>]) -> Self
where
Post<T>: ToRssItem,
{
let items: Vec<_> = posts.iter().map(|p| p.to_rss_item(&state)).collect();
Self::from_items(state, &items)
}
pub fn from_items(state: AppState, items: &[rss::Item]) -> Self {
use rss::ChannelBuilder;
let channel = ChannelBuilder::default()
.title("coreyja Blog".to_string())
.link(state.app.home_page())
.copyright(Some("Copyright Corey Alexander".to_string()))
.language(Some("en-us".to_string()))
.items(items)
.build();
Self(channel)
}
pub fn validate(&self) -> Result<(), rss::validation::ValidationError> {
self.0.validate()
}
}
#[instrument(skip_all)]
pub(crate) async fn rss_feed(
State(state): State<AppState>,
State(posts): State<Arc<BlogPosts>>,
) -> Result<impl IntoResponse, StatusCode> {
let channel = MyChannel::from_posts(state, &posts.by_recency());
Ok(channel.into_response())
}
#[instrument(skip_all)]
pub(crate) async fn full_rss_feed(
State(state): State<AppState>,
State(blog_posts): State<Arc<BlogPosts>>,
State(til_posts): State<Arc<TilPosts>>,
) -> Result<impl IntoResponse, StatusCode> {
let mut items_with_date: Vec<(chrono::NaiveDate, rss::Item)> = vec![];
items_with_date.extend(
blog_posts
.by_recency()
.into_iter()
.map(|p| (p.posted_on(), p.to_rss_item(&state))),
);
items_with_date.extend(
til_posts
.by_recency()
.into_iter()
.map(|p| (p.posted_on(), p.to_rss_item(&state))),
);
items_with_date.sort_by_key(|&(date, _)| std::cmp::Reverse(date));
let items: Vec<rss::Item> = items_with_date.into_iter().map(|(_, i)| i).collect();
let channel = MyChannel::from_items(state, &items);
Ok(channel.into_response())
}
impl IntoResponse for MyChannel {
fn into_response(self) -> Response {
Response::builder()
.header("Content-Type", "application/rss+xml")
.body(self.0.to_string())
.unwrap()
.into_response()
}
}
#[instrument(skip_all)]
pub(crate) async fn posts_index(State(posts): State<Arc<BlogPosts>>) -> Result<Markup, StatusCode> {
Ok(base_constrained(html! {
h1 class="text-3xl" { "Blog Posts" }
(BlogPostList(posts.by_recency()))
}))
}
#[instrument(skip(state, posts))]
pub(crate) async fn post_get(
State(state): State<AppState>,
State(posts): State<Arc<BlogPosts>>,
Path(key): Path<String>,
) -> Result<Response, StatusCode> {
{
let path = BlogPostPath::new(key.clone());
if path.file_exists() && !path.file_is_markdown() {
return Ok(path.raw_bytes().into_response());
}
}
let (post, m) = posts
.posts()
.iter()
.find_map(|p| p.matches_path(&key).map(|m| (p, m)))
.ok_or(StatusCode::NOT_FOUND)?;
if let MatchesPath::RedirectToCanonicalPath = m {
return Ok(
Redirect::permanent(&format!("/posts/{}", post.path.canonical_path())).into_response(),
);
}
let markdown = post.markdown();
Ok(base_constrained(html! {
h1 class="text-2xl" { (markdown.title) }
subtitle class="block text-lg text-subtitle mb-8" { (markdown.date) }
div {
(markdown.ast.into_html(&state))
}
})
.into_response())
}
|
use chrono::{Datelike, Local, TimeZone};
use serde::{Deserialize, Serialize};
use std::fs::File;
use std::io::Write;
#[derive(Deserialize, Serialize)]
pub struct Stat<T> {
pub value: T,
pub weight: f32,
}
#[derive(Deserialize, Serialize)]
pub struct Stats {
pub first_online: u64,
pub region: String,
pub uptime: Stat<Vec<u32>>,
pub capacity: Stat<u64>,
pub connection: Stat<u32>,
pub uptime_counter: Stat<u64>,
}
#[derive(Deserialize)]
pub struct StatStore {
pub stats: Stats,
pub path: String,
}
pub trait StatStoreFunc {
fn new(stats: Stats, path: String) -> StatStore;
fn total_rating(&self, capacity_left: u64) -> f32; // Returns the total weight of the node
fn connection_rating(&self) -> f32;
fn capacity_rating(&self, capacity_left: u64) -> f32;
fn uptime_rating(&self) -> f32;
fn uptime_left_rating(&self) -> f32;
fn uptime_count_rating(&self) -> f32;
fn increase_uptime_counter(&mut self, inc: u64);
fn serialize_state(&self); // Save StatStore to disk
fn deserialize_state(path: &str) -> Stats; // Read StatStore from disk
}
impl StatStoreFunc for StatStore {
fn new(stats: Stats, path: String) -> StatStore {
let mut stats = stats;
if stats.first_online == 0 {
stats.first_online = chrono::Utc::now().timestamp() as u64;
}
StatStore { stats, path }
}
fn total_rating(&self, capacity_left: u64) -> f32 {
let ratings = vec![
self.connection_rating(),
self.capacity_rating(capacity_left),
// self.uptime_rating(),
// self.uptime_left_rating(),
self.uptime_count_rating(),
];
return ratings.iter().sum();
}
fn connection_rating(&self) -> f32 {
let speed = self.stats.connection.value;
let speed_rating: f32;
if speed < 6000 {
speed_rating = 0.1;
} else if speed >= 6000 && speed < 16000 {
speed_rating = 0.3;
} else if speed >= 16000 && speed < 50000 {
speed_rating = 0.4;
} else if speed >= 50000 && speed < 200000 {
speed_rating = 0.6;
} else if speed >= 200000 && speed < 1000000 {
speed_rating = 0.8;
} else {
speed_rating = 1.0;
}
return speed_rating * self.stats.connection.weight;
}
fn uptime_rating(&self) -> f32 {
let up = self.stats.uptime.value[0] as f32;
let down = self.stats.uptime.value[1] as f32;
let uptime_in_hours = down - up;
(uptime_in_hours / 24.0) * self.stats.uptime.weight
}
fn uptime_left_rating(&self) -> f32 {
let now = Local::now();
let total_uptime_in_minutes =
((self.stats.uptime.value[1] - self.stats.uptime.value[0]) * 60) as f32;
let down =
Local
.ymd(now.year(), now.month(), now.day())
.and_hms(self.stats.uptime.value[1], 0, 0);
let minutes_left = down.signed_duration_since(now).num_minutes() as f32;
(minutes_left / total_uptime_in_minutes) * self.stats.uptime.weight
}
fn capacity_rating(&self, capacity_left: u64) -> f32 {
(capacity_left as f32 / self.stats.capacity.value as f32) * self.stats.capacity.weight
}
fn uptime_count_rating(&self) -> f32 {
let now = chrono::Utc::now().timestamp() as u64;
let total_seconds = now - self.stats.first_online;
if total_seconds == 0 {
return 0.0;
}
(self.stats.uptime_counter.value as f32 / total_seconds as f32)
* self.stats.uptime_counter.weight
}
fn increase_uptime_counter(&mut self, inc: u64) {
self.stats.uptime_counter.value += inc;
}
fn serialize_state(&self) {
let path = format!("{}/stat_state.json", self.path);
let serialized = serde_json::to_string(&self.stats).unwrap();
let mut file = File::create(path).unwrap();
let _ = file.write_all(serialized.as_bytes());
}
fn deserialize_state(path: &str) -> Stats {
let complete_path = format!("{}/stat_state.json", path);
let data = std::fs::read_to_string(complete_path).expect("Unable to read file");
let config: Stats = serde_json::from_str(&data).expect("JSON was not well-formatted");
return config;
}
}
impl StatStore {}
|
use ipfs::{Ipfs, IpfsOptions, IpfsPath, PeerId, TestTypes};
use futures::{FutureExt, TryFutureExt};
fn main() {
let options = IpfsOptions::<TestTypes>::default();
env_logger::Builder::new().parse_filters(&options.ipfs_log).init();
tokio::runtime::current_thread::block_on_all(async move {
// Start daemon and initialize repo
let (ipfs, fut) = Ipfs::new(options).start().await.unwrap();
tokio::spawn(fut.unit_error().boxed().compat());
// Create a Block
let ipfs_path = ipfs.put_dag("block v0".into()).await.unwrap();
// Publish a Block
let ipns_path = ipfs.publish_ipns(&PeerId::random(), &ipfs_path).await.unwrap();
// Resolve a Block
let new_ipfs_path = ipfs.resolve_ipns(&ipns_path).await.unwrap();
assert_eq!(ipfs_path, new_ipfs_path);
// Resolve dnslink
let ipfs_path = IpfsPath::from_str("/ipns/ipfs.io").unwrap();
println!("Resolving {:?}", ipfs_path.to_string());
let ipfs_path = ipfs.resolve_ipns(&ipfs_path).await.unwrap();
println!("Resolved stage 1: {:?}", ipfs_path.to_string());
let ipfs_path = ipfs.resolve_ipns(&ipfs_path).await.unwrap();
println!("Resolved stage 2: {:?}", ipfs_path.to_string());
ipfs.exit_daemon();
}.unit_error().boxed().compat()).unwrap();
}
|
use crate::error::NiaServerResult;
use crate::protocol::{NiaConvertable, NiaKey, Serializable};
#[derive(Clone, Debug, Eq)]
pub struct NiaKeyChord {
modifiers: Vec<NiaKey>,
ordinary_key: NiaKey,
}
impl PartialEq for NiaKeyChord {
fn eq(&self, other: &Self) -> bool {
if self.ordinary_key != other.ordinary_key {
return false;
}
if self.modifiers.len() != other.modifiers.len() {
return false;
}
for modifier in &self.modifiers {
if !other.modifiers.contains(modifier) {
return false;
}
}
for other_modifier in &other.modifiers {
if !self.modifiers.contains(other_modifier) {
return false;
}
}
return true;
}
}
impl NiaKeyChord {
pub fn new(modifiers: Vec<NiaKey>, ordinary_key: NiaKey) -> NiaKeyChord {
NiaKeyChord {
modifiers,
ordinary_key,
}
}
pub fn get_modifiers(&self) -> &Vec<NiaKey> {
&self.modifiers
}
pub fn get_key(&self) -> NiaKey {
self.ordinary_key
}
}
impl NiaConvertable<NiaKeyChord, nia_interpreter_core::KeyChord>
for NiaKeyChord
{
fn to_interpreter_repr(&self) -> nia_interpreter_core::KeyChord {
let mut interpreter_modifiers = Vec::new();
for modifier in &self.modifiers {
let interpreter_modifier = modifier.to_interpreter_repr();
interpreter_modifiers.push(interpreter_modifier);
}
let interpreter_key = self.ordinary_key.to_interpreter_repr();
nia_interpreter_core::KeyChord::new(
interpreter_modifiers,
interpreter_key,
)
}
fn from_interpreter_repr(
key_chord: &nia_interpreter_core::KeyChord,
) -> NiaServerResult<NiaKeyChord> {
let mut modifiers = Vec::new();
for interpreter_modifier in key_chord.get_modifiers() {
let modifier = NiaKey::from_interpreter_repr(interpreter_modifier)?;
modifiers.push(modifier);
}
let ordinary_key = NiaKey::from_interpreter_repr(&key_chord.get_key())?;
let key_chord = NiaKeyChord::new(modifiers, ordinary_key);
Ok(key_chord)
}
}
impl Serializable<NiaKeyChord, nia_protocol_rust::KeyChord> for NiaKeyChord {
fn to_pb(&self) -> nia_protocol_rust::KeyChord {
let mut key_chord_pb = nia_protocol_rust::KeyChord::new();
let ordinary_key_pb = self.ordinary_key.to_pb();
let modifiers_pb_vector = self
.modifiers
.iter()
.map(|key| key.to_pb())
.collect::<Vec<nia_protocol_rust::Key>>();
key_chord_pb
.set_modifiers(protobuf::RepeatedField::from(modifiers_pb_vector));
key_chord_pb.set_ordinary_key(ordinary_key_pb);
key_chord_pb
}
fn from_pb(
object_pb: nia_protocol_rust::KeyChord,
) -> NiaServerResult<NiaKeyChord> {
let mut object_pb = object_pb;
let ordinary_key = NiaKey::from_pb(object_pb.take_ordinary_key())?;
let mut modifiers = Vec::new();
for modifier in object_pb.take_modifiers().into_iter() {
let modifier = NiaKey::from_pb(modifier)?;
modifiers.push(modifier);
}
let key_chord = NiaKeyChord::new(modifiers, ordinary_key);
Ok(key_chord)
}
}
#[cfg(test)]
mod tests {
#[allow(unused_imports)]
use super::*;
#[cfg(test)]
mod serialization {
#[allow(unused_imports)]
use super::*;
#[test]
fn serializes_and_deserializes() {
let expected_key_chord = NiaKeyChord::new(
vec![NiaKey::make_key_2(1, 2), NiaKey::make_key_1(1)],
NiaKey::make_key_2(2, 3),
);
let bytes = expected_key_chord.to_bytes().unwrap();
let key_chord = NiaKeyChord::from_bytes(bytes).unwrap();
assert_eq!(expected_key_chord, key_chord)
}
}
#[cfg(test)]
mod convertation {
#[allow(unused_imports)]
use super::*;
#[test]
fn convertable_between_server_and_interpreter_representations() {
let expected_key_chord = NiaKeyChord::new(
vec![NiaKey::make_key_2(1, 2), NiaKey::make_key_1(1)],
NiaKey::make_key_2(2, 3),
);
let interpreter_key_chord =
expected_key_chord.to_interpreter_repr();
let key_chord =
NiaKeyChord::from_interpreter_repr(&interpreter_key_chord)
.unwrap();
assert_eq!(expected_key_chord, key_chord)
}
}
}
|
use serde::ser::{SerializeStruct, SerializeStructVariant, SerializeTupleVariant};
use serde::Serialize;
use crate::Element;
use crate::error::TychoError;
use crate::serde::ser::seq::SeqSerializer;
use crate::serde::ser::struct_::StructSerializer;
pub struct VariantSeqSerializer {
name: String,
seq: SeqSerializer
}
impl VariantSeqSerializer {
pub fn new(name: &str, seq: SeqSerializer) -> Self {
Self {
name: name.to_string(),
seq
}
}
}
impl SerializeTupleVariant for VariantSeqSerializer {
type Ok = Element;
type Error = TychoError;
fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error> where
T: Serialize {
self.seq.element(value)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Element::Variant(self.name, Box::from(self.seq.finish()?)))
}
}
pub struct VariantStructSerializer {
name: String,
inner: StructSerializer
}
impl VariantStructSerializer {
pub fn new(s: &str, inner: StructSerializer) -> Self {
Self {
name: s.to_string(),
inner
}
}
}
impl SerializeStructVariant for VariantStructSerializer {
type Ok = Element;
type Error = TychoError;
fn serialize_field<T: ?Sized>(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error> where
T: Serialize {
self.inner.serialize_field(key, value)
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Element::Variant(self.name, Box::from(self.inner.end()?)))
}
} |
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
use walkdir::WalkDir;
/// Find *.rs recursively
pub fn find_rust_files(rust_path: &str) -> Vec<PathBuf> {
WalkDir::new(rust_path)
.into_iter()
.filter_map(|e| e.ok())
.filter(|e| {
e.file_type().is_file()
&& e.path().extension().is_some()
&& e.path().extension().unwrap() == "rs"
})
.map(|e| e.into_path())
.collect()
}
/// Parse a rust file into a TokenTree
pub fn parse_file(path: PathBuf) -> syn::File {
let mut file = File::open(&path).expect("Unable to open file");
let mut src = String::new();
file.read_to_string(&mut src).expect("Unable to read file");
syn::parse_file(&src).expect("Unable to parse file")
}
|
use crate::common::{ Opcode, Opmode, OPMODES, Closure, Value };
pub fn get_op(i: u32) -> Opcode {
Opcode::from((i >> 26 & 0x3F) as u8)
}
pub fn get_a_mode(i: u32) -> u8 {
(i >> 25 & 0x1) as u8
}
pub fn get_a(i: u32) -> u8 {
(i >> 17 & 0xFF) as u8
}
pub fn get_b_mode(i: u32) -> u8 {
(i >> 16 & 0x1) as u8
}
pub fn get_b(i: u32) -> u8 {
(i >> 8 & 0xFF) as u8
}
pub fn get_c(i: u32) -> u8 {
(i & 0xFF) as u8
}
pub fn get_bx(i: u32) -> u16 {
((get_b(i) as u16) << 8)
| (get_c(i) as u16)
}
pub fn format_instruction(i: u32) -> String {
let (name, mode) = OPMODES[get_op(i) as usize];
let am = if get_a_mode(i) == 1 { "-" } else { "" };
match mode {
Opmode::Abc => {
let bm = if get_b_mode(i) == 1 { "-" } else { "" };
format!("{} {}{} {}{} {}", name, am, get_a(i), bm, get_b(i), get_c(i))
}
Opmode::Abx => {
format!("{} {}{} {}", name, am, get_a(i), get_bx(i))
}
}
}
fn get_fn_info(closure: Closure) -> String {
let mut nfn = 0;
for val in &closure.consts {
if let Value::Closure(..) = val { nfn += 1 }
}
format!("{} <{}> ({} instructions)\n{} params, {} constants, {} functions", closure.name, closure.file_name, closure.code.len(), closure.nparams, closure.consts.len() - nfn, nfn)
}
pub fn pretty_print_closure(closure: Closure, recursive: bool) {
println!("{}", get_fn_info(closure.clone()));
for (idx, instruction) in closure.code.iter().enumerate() {
let s = format_instruction(*instruction);
println!("\t{}\t[{}]\t{}", idx + 1, closure.lines[idx], s);
}
let mut funcs = Vec::new();
let consts = closure.consts.iter().filter(| v | {
if let Value::Closure(c) = v { funcs.push(c); false } else { true }
}).collect::<Vec<&Value>>();
println!("constants ({})", consts.len());
for (idx, konst) in consts.iter().enumerate() {
println!("\t{}\t{:?}", idx + 1, konst)
}
if recursive {
for func in funcs {
println!();
pretty_print_closure(func.clone(), true)
}
}
} |
extern crate sdl;
use std::num::Wrapping;
use modulo::Mod;
use rand::rngs::SmallRng;
use rand::{Rng, SeedableRng};
use sdl::event::Event;
use retrofw2_rust::controls::*;
use retrofw2_rust::geom::Painter;
struct Asteroids {
screen: sdl::video::Surface,
video_info: sdl::video::VideoInfo,
height: isize,
prop_width: f32,
pressed_keys: PressedKeys,
ship: Ship,
sin_cos_lut: [(f32, f32); 256],
frames: usize,
start: std::time::Instant,
last_frame: std::time::Instant,
stars: [Star; 100],
asteroids: std::vec::Vec<Asteroid>,
}
impl Asteroids {
fn new() -> Asteroids {
let (screen, video_info) = retrofw2_rust::gfx::init();
let width = video_info.width;
let height = video_info.height;
let prop_width = width as f32 / height as f32;
Asteroids {
screen,
video_info,
height,
prop_width,
pressed_keys: PressedKeys::default(),
ship: Ship {
x: prop_width / 2.0,
y: 0.5,
rot: Wrapping(0u8),
dx: 0.0,
dy: 0.0,
},
sin_cos_lut: retrofw2_rust::geom::get_sin_cos_lut(),
frames: 0,
start: std::time::Instant::now(),
last_frame: std::time::Instant::now(),
stars: make_stars(prop_width),
asteroids: make_asteroids(prop_width),
}
}
fn main(mut self) {
self.start = std::time::Instant::now();
'main: loop {
self.last_frame = std::time::Instant::now();
'event: loop {
let event = sdl::event::poll_event();
self.pressed_keys.process_key(&event);
match event {
Event::Quit => break 'main,
Event::None => break 'event,
_ => {}
}
}
if self.pressed_keys.is_pressed(CONTROL_SELECT) {
break;
}
self.handle_ship();
self.handle_stars();
self.handle_asteroids();
self.screen.fill(sdl::video::Color::RGB(0, 0, 0));
self.frames += 1;
let _draw = |pixels: &mut [u8]| -> bool {
let mut painter = Painter {
video_info: &self.video_info,
pixels: std::boxed::Box::new(pixels),
};
self.draw_ship(&mut painter);
self.draw_stars(&mut painter);
self.draw_asteroids(&mut painter);
true
};
self.screen.with_lock(_draw);
self.screen.flip();
if cfg!(feature = "throttle") {
let now = std::time::Instant::now();
let frame_diff = now.duration_since(self.last_frame);
if frame_diff < std::time::Duration::from_millis(16) {
std::thread::sleep(std::time::Duration::from_millis(16) - frame_diff);
}
}
}
sdl::quit();
println!(
"{}",
1000.0 * self.frames as f32 / self.start.elapsed().as_millis() as f32
);
}
fn handle_ship(&mut self) {
if self.pressed_keys.is_pressed(CONTROL_LEFT) {
self.ship.rot += std::num::Wrapping(4);
}
if self.pressed_keys.is_pressed(CONTROL_RIGHT) {
self.ship.rot -= std::num::Wrapping(4);
}
if self.pressed_keys.is_pressed(CONTROL_B) {
let d = self.sin_cos_lut[self.ship.rot.0 as usize];
self.ship.dx += d.0;
self.ship.dy += d.1;
}
self.ship.dx *= 0.9;
self.ship.dy *= 0.9;
}
fn draw_ship(&self, painter: &mut Painter) {
self.draw_polar_line(
painter,
self.ship.x,
self.ship.y,
self.ship.rot,
0.03,
self.ship.rot + Wrapping(40u8),
-0.03,
);
self.draw_polar_line(
painter,
self.ship.x,
self.ship.y,
self.ship.rot,
0.03,
self.ship.rot - Wrapping(40u8),
-0.03,
);
self.draw_polar_line(
painter,
self.ship.x,
self.ship.y,
self.ship.rot,
0.0,
self.ship.rot + Wrapping(40u8),
-0.03,
);
self.draw_polar_line(
painter,
self.ship.x,
self.ship.y,
self.ship.rot,
0.0,
self.ship.rot - Wrapping(40u8),
-0.03,
);
}
fn handle_stars(&mut self) {
for mut star in self.stars.iter_mut() {
star.x -= self.ship.dx * (star.depth as f32) / 500_000.0;
star.y -= self.ship.dy * (star.depth as f32) / 500_000.0;
star.x = star.x.modulo(self.prop_width);
star.y = star.y.modulo(1.0);
}
}
fn handle_asteroids(&mut self) {
for mut asteroid in self.asteroids.iter_mut() {
asteroid.rot =
Wrapping(Wrapping(asteroid.rot.0 as i16 + asteroid.drot.0 as i16).0 as u8);
asteroid.x -= self.ship.dx / 1000.0;
asteroid.y -= self.ship.dy / 1000.0;
asteroid.x -= asteroid.dx / 1000.0;
asteroid.y -= asteroid.dy / 1000.0;
if asteroid.x < -0.2 {
asteroid.x = self.prop_width + 0.1;
}
if asteroid.x > self.prop_width + 0.2 {
asteroid.x = -0.1;
}
if asteroid.y < -0.2 {
asteroid.y = 1.1;
}
if asteroid.y > 1.2 {
asteroid.y = -0.1;
}
}
}
fn draw_stars(&self, painter: &mut Painter) {
for star in self.stars.iter() {
painter.put_pixel(
(star.x * self.height as f32) as isize,
(star.y * self.height as f32) as isize,
star.depth,
star.depth,
star.depth,
);
}
}
fn draw_asteroids(&self, painter: &mut Painter) {
for asteroid in self.asteroids.iter() {
let mut rotated = asteroid.rads;
rotated.rotate_left(1);
for (i, (rad1, rad2)) in asteroid.rads.iter().zip(rotated.iter()).enumerate() {
self.draw_polar_line(
painter,
asteroid.x,
asteroid.y,
asteroid.rot + Wrapping((i as u8) << 4),
*rad1,
asteroid.rot + Wrapping(((i + 1) as u8) << 4),
*rad2,
);
}
}
}
#[allow(clippy::too_many_arguments)]
fn draw_polar_line(
&self,
painter: &mut Painter,
cx: f32,
cy: f32,
rot1: Wrapping<u8>,
rad1: f32,
rot2: Wrapping<u8>,
rad2: f32,
) {
let (sx1, sy1) = self.sin_cos_lut[rot1.0 as usize];
let (sx2, sy2) = self.sin_cos_lut[rot2.0 as usize];
painter.draw_line(
((cx + rad1 * sx1) * self.height as f32) as isize,
((cy + rad1 * sy1) * self.height as f32) as isize,
((cx + rad2 * sx2) * self.height as f32) as isize,
((cy + rad2 * sy2) * self.height as f32) as isize,
255,
255,
255,
);
}
}
struct Ship {
x: f32,
y: f32,
rot: Wrapping<u8>,
dx: f32,
dy: f32,
}
#[derive(Default, Copy, Clone)]
struct Star {
x: f32,
y: f32,
depth: u8,
}
fn make_stars(prop_width: f32) -> [Star; 100] {
let mut stars = [Star::default(); 100];
let mut rng = SmallRng::from_entropy();
for mut star in stars.iter_mut() {
star.x = rng.gen_range(0.0, prop_width);
star.y = rng.gen_range(0.0, 1.0);
star.depth = rng.gen_range(0, 255);
}
stars
}
struct Asteroid {
x: f32,
y: f32,
dx: f32,
dy: f32,
rot: Wrapping<u8>,
drot: Wrapping<i8>,
rads: [f32; 16],
}
impl Asteroid {
fn new(prop_width: f32) -> Asteroid {
let mut rng = SmallRng::from_entropy();
Asteroid {
x: rng.gen_range(0.0, prop_width),
y: rng.gen_range(0.0, 1.0),
dx: rng.gen_range(-3.0, 3.0),
dy: rng.gen_range(-3.0, 3.0),
rot: Wrapping(0),
drot: Wrapping(rng.gen_range(-4, 4)),
rads: Asteroid::make_rads(),
}
}
fn make_rads() -> [f32; 16] {
let mut rads = [0.0; 16];
let mut rng = SmallRng::from_entropy();
let rad = rng.gen_range(0.02, 0.1);
std::ops::Range { start: 0, end: 16 }
.map(|_| rad + rng.gen_range(-0.01, 0.01))
.zip(rads.iter_mut())
.for_each(|(sc, a)| *a = sc);
rads
}
}
fn make_asteroids(prop_width: f32) -> std::vec::Vec<Asteroid> {
(0..10)
.map(|_| Asteroid::new(prop_width))
.collect::<std::vec::Vec<Asteroid>>()
}
fn main() {
Asteroids::new().main();
}
|
// Copyright 2019-2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Contains the same API as the `http` module, except that everything returns an error.
use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Timestamp};
use std::{future::Future, pin::Pin, task::Context, task::Poll};
/// Wrapper struct (wrapping nothing in case of http_dummy) used for keeping the hyper_rustls client
/// running.
#[derive(Clone)]
pub struct SharedClient;
impl SharedClient {
pub fn new() -> Self {
Self
}
}
/// Creates a pair of [`HttpApi`] and [`HttpWorker`].
pub fn http(_: SharedClient) -> (HttpApi, HttpWorker) {
(HttpApi, HttpWorker)
}
/// Dummy implementation of HTTP capabilities.
#[derive(Debug)]
pub struct HttpApi;
/// Dummy implementation of HTTP capabilities.
#[derive(Debug)]
pub struct HttpWorker;
impl HttpApi {
/// Mimics the corresponding method in the offchain API.
pub fn request_start(&mut self, _: &str, _: &str) -> Result<HttpRequestId, ()> {
/// Because this always returns an error, none of the other methods should ever be called.
Err(())
}
/// Mimics the corresponding method in the offchain API.
pub fn request_add_header(&mut self, _: HttpRequestId, _: &str, _: &str) -> Result<(), ()> {
unreachable!(
"Creating a request always fails, thus this function will \
never be called; qed"
)
}
/// Mimics the corresponding method in the offchain API.
pub fn request_write_body(
&mut self,
_: HttpRequestId,
_: &[u8],
_: Option<Timestamp>,
) -> Result<(), HttpError> {
unreachable!(
"Creating a request always fails, thus this function will \
never be called; qed"
)
}
/// Mimics the corresponding method in the offchain API.
pub fn response_wait(
&mut self,
requests: &[HttpRequestId],
_: Option<Timestamp>,
) -> Vec<HttpRequestStatus> {
if requests.is_empty() {
Vec::new()
} else {
unreachable!(
"Creating a request always fails, thus the list of requests should \
always be empty; qed"
)
}
}
/// Mimics the corresponding method in the offchain API.
pub fn response_headers(&mut self, _: HttpRequestId) -> Vec<(Vec<u8>, Vec<u8>)> {
unreachable!(
"Creating a request always fails, thus this function will \
never be called; qed"
)
}
/// Mimics the corresponding method in the offchain API.
pub fn response_read_body(
&mut self,
_: HttpRequestId,
_: &mut [u8],
_: Option<Timestamp>,
) -> Result<usize, HttpError> {
unreachable!(
"Creating a request always fails, thus this function will \
never be called; qed"
)
}
}
impl Future for HttpWorker {
type Output = ();
fn poll(self: Pin<&mut Self>, _: &mut Context) -> Poll<Self::Output> {
Poll::Ready(())
}
}
|
use std::convert::From;
use std::fmt::{self, Display};
use std::sync::Arc;
use async_trait::async_trait;
use failure::Fallible;
use http::header::{self, AUTHORIZATION};
use reqwest::Client;
use serde::Serialize;
use slog_scope::{error, info};
use crate::components::access_token_provider::{self, AccessTokenProvider};
use crate::config::Config;
use crate::player::{PauseState, PlaybackHandle};
use super::connect::{self, SpotifyConnector};
use super::util::is_currently_playing;
pub use err::*;
pub struct SpotifyPlayer {
http_client: Arc<Client>,
access_token_provider: Arc<AccessTokenProvider>,
spotify_connector: Arc<Box<dyn SpotifyConnector + 'static + Sync + Send>>,
device_name: Arc<String>,
}
#[derive(Debug, Clone, Serialize)]
struct StartPlayback {
#[serde(skip_serializing_if = "Option::is_none")]
context_uri: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
uris: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
position_ms: Option<u128>,
}
pub struct SpotifyPlaybackHandle {
device_name: Arc<String>,
http_client: Arc<Client>,
access_token_provider: Arc<AccessTokenProvider>,
uri: String,
spotify_connector: Arc<Box<dyn SpotifyConnector + 'static + Sync + Send>>,
}
#[async_trait]
impl PlaybackHandle for SpotifyPlaybackHandle {
async fn stop(&self) -> Fallible<()> {
let msg = "Failed to stop Spotify playback";
let access_token = self.access_token_provider.get_token()?;
let device_id = match self.spotify_connector.device_id() {
Some(device_id) => device_id,
None => return Err(Error::NoSpotifyDevice.into()),
};
self.http_client
.put("https://api.spotify.com/v1/me/player/pause")
.query(&[("device_id", &device_id)])
.body("")
.header(header::CONTENT_LENGTH, 0)
.header(AUTHORIZATION, format!("Bearer {}", access_token))
.send()
.await
.map_err(|err| {
error!("{}: Executing HTTP request failed: {}", msg, err);
err
})
.map(|rsp| {
if !rsp.status().is_success() {
error!("{}: HTTP Failure {}", msg, rsp.status());
}
rsp
})?
.error_for_status()
.map(|_| ())
.map_err(|err| Error::HTTP(err).into())
}
async fn is_complete(&self) -> Fallible<bool> {
is_currently_playing(
&*self.http_client,
&*self.access_token_provider,
&*self.device_name,
)
.await
.map(|x| !x)
}
async fn pause(&self) -> Fallible<()> {
let msg = "Failed to stop Spotify playback";
let access_token = self.access_token_provider.get_token()?;
let device_id = match self.spotify_connector.device_id() {
Some(device_id) => device_id,
None => return Err(Error::NoSpotifyDevice.into()),
};
self.http_client
.put("https://api.spotify.com/v1/me/player/pause")
.query(&[("device_id", &device_id)])
.body("")
.header(header::CONTENT_LENGTH, 0)
.header(AUTHORIZATION, format!("Bearer {}", access_token))
.send()
.await
.map_err(|err| {
error!("{}: Executing HTTP request failed: {}", msg, err);
err
})
.map(|rsp| {
if !rsp.status().is_success() {
error!("{}: HTTP Failure {}", msg, rsp.status());
}
rsp
})?
.error_for_status()
.map(|_| ())
.map_err(|err| Error::HTTP(err).into())
}
async fn cont(&self, pause_state: PauseState) -> Fallible<()> {
let msg = "Failed to start Spotify playback";
let access_token = self.access_token_provider.get_token()?;
let device_id = match self.spotify_connector.device_id() {
Some(device_id) => device_id,
None => return Err(Error::NoSpotifyDevice.into()),
};
let req =
Self::derive_start_playback_payload_from_spotify_uri(&self.uri, &Some(pause_state));
self.http_client
.put("https://api.spotify.com/v1/me/player/play")
.query(&[("device_id", &device_id)])
.header(AUTHORIZATION, format!("Bearer {}", access_token))
.json(&req)
.send()
.await
.map_err(|err| {
error!("{}: Executing HTTP request failed: {}", msg, err);
err
})
.map(|rsp| {
if !rsp.status().is_success() {
error!("{}: HTTP Failure {}", msg, rsp.status());
}
rsp
})?
.error_for_status()
.map(|_| ())
.map_err(|err| Error::HTTP(err).into())
}
async fn replay(&self) -> Fallible<()> {
let msg = "Failed to start Spotify playback";
let access_token = self.access_token_provider.get_token()?;
let device_id = match self.spotify_connector.device_id() {
Some(device_id) => device_id,
None => return Err(Error::NoSpotifyDevice.into()),
};
let req = Self::derive_start_playback_payload_from_spotify_uri(&self.uri, &None);
self.http_client
.put("https://api.spotify.com/v1/me/player/play")
.query(&[("device_id", &device_id)])
.header(AUTHORIZATION, format!("Bearer {}", access_token))
.json(&req)
.send()
.await
.map_err(|err| {
error!("{}: Executing HTTP request failed: {}", msg, err);
err
})
.map(|rsp| {
if !rsp.status().is_success() {
error!("{}: HTTP Failure {}", msg, rsp.status());
}
rsp
})?
.error_for_status()
.map(|_| ())
.map_err(|err| Error::HTTP(err).into())
}
}
impl SpotifyPlaybackHandle {
fn derive_start_playback_payload_from_spotify_uri(
spotify_uri: &str,
pause_state: &Option<PauseState>,
) -> StartPlayback {
let position_ms = pause_state.as_ref().map(|x| x.pos.as_millis());
if &spotify_uri[0..14] == "spotify:album:" {
StartPlayback {
uris: None,
context_uri: Some(spotify_uri.clone().to_string()),
position_ms,
}
} else {
StartPlayback {
uris: Some(vec![spotify_uri.clone().to_string()]),
context_uri: None,
position_ms,
}
}
}
}
impl SpotifyPlayer {
pub fn new(config: &Config) -> Fallible<Self> {
let http_client = Arc::new(Client::new());
// Create Access Token Provider
let access_token_provider = Arc::new(access_token_provider::AccessTokenProvider::new(
&config.client_id,
&config.client_secret,
&config.refresh_token,
)?);
let spotify_connector = Arc::new(Box::new(
connect::external_command::ExternalCommand::new_from_env(
&access_token_provider.clone(),
config.device_name.clone(),
)
.unwrap(),
)
as Box<dyn SpotifyConnector + 'static + Sync + Send>);
info!("Creating new SpotifyPlayer...");
Ok(SpotifyPlayer {
http_client,
access_token_provider,
spotify_connector,
device_name: Arc::new(config.device_name.clone()),
})
}
pub fn wait_until_ready(&self) -> Result<(), Error> {
self.spotify_connector
.wait_until_ready()
.map_err(|_err| Error::NoSpotifyDevice)?;
self.access_token_provider
.wait_for_token()
.map_err(|_err| Error::NoToken)?;
Ok(())
}
pub async fn start_playback(
&self,
spotify_uri: &str,
pause_state: Option<PauseState>,
) -> Result<SpotifyPlaybackHandle, failure::Error> {
// let req = Self::derive_start_playback_payload_from_spotify_uri(spotify_uri, &pause_state);
let handle = SpotifyPlaybackHandle {
http_client: self.http_client.clone(),
access_token_provider: self.access_token_provider.clone(),
uri: spotify_uri.to_string().clone(),
spotify_connector: self.spotify_connector.clone(),
device_name: self.device_name.clone(),
};
let _ = handle.replay().await?;
Ok(handle)
}
}
pub mod err {
use super::*;
#[derive(Debug)]
pub enum Error {
HTTP(reqwest::Error),
NoSpotifyDevice,
NoToken,
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::HTTP(err) => write!(f, "Spotify HTTP Error {}", err),
Error::NoSpotifyDevice => write!(f, "No Spotify Connect Device found"),
Error::NoToken => write!(
f,
"Failed to obtain access token from Access Token Provider"
),
}
}
}
impl From<reqwest::Error> for Error {
fn from(err: reqwest::Error) -> Self {
Error::HTTP(err)
}
}
impl From<access_token_provider::AtpError> for Error {
fn from(err: access_token_provider::err::AtpError) -> Self {
match err {
access_token_provider::AtpError::NoTokenReceivedYet => Error::NoToken,
}
}
}
impl std::error::Error for Error {}
}
|
pub mod http;
pub mod tpl;
|
use crate::{AccountName, NumBytes, Read, ScopeName, TableName, Write};
use std::marker::PhantomData;
/// TODO docs
pub trait Table: Sized {
/// TODO docs
const NAME: u64;
/// TODO docs
type Row: Read + Write + NumBytes;
/// TODO docs
fn primary_key(row: &Self::Row) -> u64;
/// TODO docs
fn secondary_keys(_row: &Self::Row) -> SecondaryKeys {
SecondaryKeys::default()
}
/// TODO docs
#[inline]
fn table<C, S>(code: C, scope: S) -> PrimaryTableIndex<Self>
where
C: Into<AccountName>,
S: Into<ScopeName>,
{
PrimaryTableIndex::new(code, scope)
}
}
/// TODO docs
#[derive(Debug, PartialEq, Eq, Clone, Copy, Default, Hash, PartialOrd, Ord)]
pub struct SecondaryTableName(TableName, usize);
impl SecondaryTableName {
/// TODO docs
#[inline]
pub const fn new(primary: TableName, index: usize) -> Self {
Self(primary, index)
}
/// TODO docs
#[inline]
pub const fn primary(&self) -> TableName {
self.0
}
/// TODO docs
#[inline]
pub const fn index(&self) -> usize {
self.1
}
}
impl From<SecondaryTableName> for u64 {
#[inline]
fn from(t: SecondaryTableName) -> Self {
let index = t.1 as Self;
let table: Self = t.0.into();
(table & 0xFFFF_FFFF_FFFF_FFF0_u64)
| (index & 0x0000_0000_0000_000F_u64)
}
}
/// TODO docs
#[derive(Clone, Copy, Debug)]
pub enum SecondaryKey {
/// TODO docs
U64(u64),
/// TODO docs
F64(f64),
}
impl From<u64> for SecondaryKey {
fn from(v: u64) -> Self {
Self::U64(v)
}
}
impl From<u32> for SecondaryKey {
fn from(v: u32) -> Self {
Self::U64(v.into())
}
}
impl From<f64> for SecondaryKey {
fn from(v: f64) -> Self {
Self::F64(v)
}
}
/// TODO docs
#[derive(Default, Clone, Copy)]
pub struct SecondaryKeys([Option<SecondaryKey>; 16]);
impl From<[Option<SecondaryKey>; 16]> for SecondaryKeys {
fn from(v: [Option<SecondaryKey>; 16]) -> Self {
Self(v)
}
}
impl SecondaryKeys {
/// TODO docs
pub fn iter(&self) -> impl Iterator<Item = &Option<SecondaryKey>> {
self.0.iter()
}
/// TODO docs
pub fn iter_mut(
&mut self,
) -> impl Iterator<Item = &mut Option<SecondaryKey>> {
self.0.iter_mut()
}
}
/// TODO docs
#[derive(Copy, Clone, Debug)]
pub struct PrimaryTableIndex<T>
where
T: Table,
{
/// TODO docs
pub code: AccountName,
/// TODO docs
pub scope: ScopeName,
/// TODO docs
_data: PhantomData<T>,
}
impl<T> PrimaryTableIndex<T>
where
T: Table,
{
/// TODO docs
#[inline]
pub fn new<C, S>(code: C, scope: S) -> Self
where
C: Into<AccountName>,
S: Into<ScopeName>,
{
Self {
code: code.into(),
scope: scope.into(),
_data: PhantomData,
}
}
}
/// TODO docs
#[derive(Copy, Clone, Debug)]
pub struct SecondaryTableIndex<K, T>
where
T: Table,
{
/// TODO docs
pub code: AccountName,
/// TODO docs
pub scope: ScopeName,
/// TODO docs
pub table: SecondaryTableName,
/// TODO docs
_data: PhantomData<(K, T)>,
}
impl<K, T> SecondaryTableIndex<K, T>
where
T: Table,
{
/// TODO docs
#[inline]
pub fn new<C, S, N>(code: C, scope: S, name: N, index: usize) -> Self
where
C: Into<AccountName>,
S: Into<ScopeName>,
N: Into<TableName>,
{
Self {
code: code.into(),
scope: scope.into(),
table: SecondaryTableName::new(name.into(), index),
_data: PhantomData,
}
}
/// TODO docs
pub fn primary_index(&self) -> PrimaryTableIndex<T> {
PrimaryTableIndex::new(self.code, self.scope)
}
}
|
#[doc = "Register `SDMMC_ICR` reader"]
pub type R = crate::R<SDMMC_ICR_SPEC>;
#[doc = "Register `SDMMC_ICR` writer"]
pub type W = crate::W<SDMMC_ICR_SPEC>;
#[doc = "Field `CCRCFAILC` reader - CCRCFAIL flag clear bit Set by software to clear the CCRCFAIL flag."]
pub type CCRCFAILC_R = crate::BitReader;
#[doc = "Field `CCRCFAILC` writer - CCRCFAIL flag clear bit Set by software to clear the CCRCFAIL flag."]
pub type CCRCFAILC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DCRCFAILC` reader - DCRCFAIL flag clear bit Set by software to clear the DCRCFAIL flag."]
pub type DCRCFAILC_R = crate::BitReader;
#[doc = "Field `DCRCFAILC` writer - DCRCFAIL flag clear bit Set by software to clear the DCRCFAIL flag."]
pub type DCRCFAILC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CTIMEOUTC` reader - CTIMEOUT flag clear bit Set by software to clear the CTIMEOUT flag."]
pub type CTIMEOUTC_R = crate::BitReader;
#[doc = "Field `CTIMEOUTC` writer - CTIMEOUT flag clear bit Set by software to clear the CTIMEOUT flag."]
pub type CTIMEOUTC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DTIMEOUTC` reader - DTIMEOUT flag clear bit Set by software to clear the DTIMEOUT flag."]
pub type DTIMEOUTC_R = crate::BitReader;
#[doc = "Field `DTIMEOUTC` writer - DTIMEOUT flag clear bit Set by software to clear the DTIMEOUT flag."]
pub type DTIMEOUTC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TXUNDERRC` reader - TXUNDERR flag clear bit Set by software to clear TXUNDERR flag."]
pub type TXUNDERRC_R = crate::BitReader;
#[doc = "Field `TXUNDERRC` writer - TXUNDERR flag clear bit Set by software to clear TXUNDERR flag."]
pub type TXUNDERRC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RXOVERRC` reader - RXOVERR flag clear bit Set by software to clear the RXOVERR flag."]
pub type RXOVERRC_R = crate::BitReader;
#[doc = "Field `RXOVERRC` writer - RXOVERR flag clear bit Set by software to clear the RXOVERR flag."]
pub type RXOVERRC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CMDRENDC` reader - CMDREND flag clear bit Set by software to clear the CMDREND flag."]
pub type CMDRENDC_R = crate::BitReader;
#[doc = "Field `CMDRENDC` writer - CMDREND flag clear bit Set by software to clear the CMDREND flag."]
pub type CMDRENDC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CMDSENTC` reader - CMDSENT flag clear bit Set by software to clear the CMDSENT flag."]
pub type CMDSENTC_R = crate::BitReader;
#[doc = "Field `CMDSENTC` writer - CMDSENT flag clear bit Set by software to clear the CMDSENT flag."]
pub type CMDSENTC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DATAENDC` reader - DATAEND flag clear bit Set by software to clear the DATAEND flag."]
pub type DATAENDC_R = crate::BitReader;
#[doc = "Field `DATAENDC` writer - DATAEND flag clear bit Set by software to clear the DATAEND flag."]
pub type DATAENDC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DHOLDC` reader - DHOLD flag clear bit Set by software to clear the DHOLD flag."]
pub type DHOLDC_R = crate::BitReader;
#[doc = "Field `DHOLDC` writer - DHOLD flag clear bit Set by software to clear the DHOLD flag."]
pub type DHOLDC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DBCKENDC` reader - DBCKEND flag clear bit Set by software to clear the DBCKEND flag."]
pub type DBCKENDC_R = crate::BitReader;
#[doc = "Field `DBCKENDC` writer - DBCKEND flag clear bit Set by software to clear the DBCKEND flag."]
pub type DBCKENDC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DABORTC` reader - DABORT flag clear bit Set by software to clear the DABORT flag."]
pub type DABORTC_R = crate::BitReader;
#[doc = "Field `DABORTC` writer - DABORT flag clear bit Set by software to clear the DABORT flag."]
pub type DABORTC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `BUSYD0ENDC` reader - BUSYD0END flag clear bit Set by software to clear the BUSYD0END flag."]
pub type BUSYD0ENDC_R = crate::BitReader;
#[doc = "Field `BUSYD0ENDC` writer - BUSYD0END flag clear bit Set by software to clear the BUSYD0END flag."]
pub type BUSYD0ENDC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SDIOITC` reader - SDIOIT flag clear bit Set by software to clear the SDIOIT flag."]
pub type SDIOITC_R = crate::BitReader;
#[doc = "Field `SDIOITC` writer - SDIOIT flag clear bit Set by software to clear the SDIOIT flag."]
pub type SDIOITC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ACKFAILC` reader - ACKFAIL flag clear bit Set by software to clear the ACKFAIL flag."]
pub type ACKFAILC_R = crate::BitReader;
#[doc = "Field `ACKFAILC` writer - ACKFAIL flag clear bit Set by software to clear the ACKFAIL flag."]
pub type ACKFAILC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ACKTIMEOUTC` reader - ACKTIMEOUT flag clear bit Set by software to clear the ACKTIMEOUT flag."]
pub type ACKTIMEOUTC_R = crate::BitReader;
#[doc = "Field `ACKTIMEOUTC` writer - ACKTIMEOUT flag clear bit Set by software to clear the ACKTIMEOUT flag."]
pub type ACKTIMEOUTC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `VSWENDC` reader - VSWEND flag clear bit Set by software to clear the VSWEND flag."]
pub type VSWENDC_R = crate::BitReader;
#[doc = "Field `VSWENDC` writer - VSWEND flag clear bit Set by software to clear the VSWEND flag."]
pub type VSWENDC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CKSTOPC` reader - CKSTOP flag clear bit Set by software to clear the CKSTOP flag."]
pub type CKSTOPC_R = crate::BitReader;
#[doc = "Field `CKSTOPC` writer - CKSTOP flag clear bit Set by software to clear the CKSTOP flag."]
pub type CKSTOPC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `IDMATEC` reader - IDMA transfer error clear bit Set by software to clear the IDMATE flag."]
pub type IDMATEC_R = crate::BitReader;
#[doc = "Field `IDMATEC` writer - IDMA transfer error clear bit Set by software to clear the IDMATE flag."]
pub type IDMATEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `IDMABTCC` reader - IDMA buffer transfer complete clear bit Set by software to clear the IDMABTC flag."]
pub type IDMABTCC_R = crate::BitReader;
#[doc = "Field `IDMABTCC` writer - IDMA buffer transfer complete clear bit Set by software to clear the IDMABTC flag."]
pub type IDMABTCC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - CCRCFAIL flag clear bit Set by software to clear the CCRCFAIL flag."]
#[inline(always)]
pub fn ccrcfailc(&self) -> CCRCFAILC_R {
CCRCFAILC_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - DCRCFAIL flag clear bit Set by software to clear the DCRCFAIL flag."]
#[inline(always)]
pub fn dcrcfailc(&self) -> DCRCFAILC_R {
DCRCFAILC_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - CTIMEOUT flag clear bit Set by software to clear the CTIMEOUT flag."]
#[inline(always)]
pub fn ctimeoutc(&self) -> CTIMEOUTC_R {
CTIMEOUTC_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - DTIMEOUT flag clear bit Set by software to clear the DTIMEOUT flag."]
#[inline(always)]
pub fn dtimeoutc(&self) -> DTIMEOUTC_R {
DTIMEOUTC_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - TXUNDERR flag clear bit Set by software to clear TXUNDERR flag."]
#[inline(always)]
pub fn txunderrc(&self) -> TXUNDERRC_R {
TXUNDERRC_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - RXOVERR flag clear bit Set by software to clear the RXOVERR flag."]
#[inline(always)]
pub fn rxoverrc(&self) -> RXOVERRC_R {
RXOVERRC_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - CMDREND flag clear bit Set by software to clear the CMDREND flag."]
#[inline(always)]
pub fn cmdrendc(&self) -> CMDRENDC_R {
CMDRENDC_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - CMDSENT flag clear bit Set by software to clear the CMDSENT flag."]
#[inline(always)]
pub fn cmdsentc(&self) -> CMDSENTC_R {
CMDSENTC_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 8 - DATAEND flag clear bit Set by software to clear the DATAEND flag."]
#[inline(always)]
pub fn dataendc(&self) -> DATAENDC_R {
DATAENDC_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - DHOLD flag clear bit Set by software to clear the DHOLD flag."]
#[inline(always)]
pub fn dholdc(&self) -> DHOLDC_R {
DHOLDC_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - DBCKEND flag clear bit Set by software to clear the DBCKEND flag."]
#[inline(always)]
pub fn dbckendc(&self) -> DBCKENDC_R {
DBCKENDC_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - DABORT flag clear bit Set by software to clear the DABORT flag."]
#[inline(always)]
pub fn dabortc(&self) -> DABORTC_R {
DABORTC_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 21 - BUSYD0END flag clear bit Set by software to clear the BUSYD0END flag."]
#[inline(always)]
pub fn busyd0endc(&self) -> BUSYD0ENDC_R {
BUSYD0ENDC_R::new(((self.bits >> 21) & 1) != 0)
}
#[doc = "Bit 22 - SDIOIT flag clear bit Set by software to clear the SDIOIT flag."]
#[inline(always)]
pub fn sdioitc(&self) -> SDIOITC_R {
SDIOITC_R::new(((self.bits >> 22) & 1) != 0)
}
#[doc = "Bit 23 - ACKFAIL flag clear bit Set by software to clear the ACKFAIL flag."]
#[inline(always)]
pub fn ackfailc(&self) -> ACKFAILC_R {
ACKFAILC_R::new(((self.bits >> 23) & 1) != 0)
}
#[doc = "Bit 24 - ACKTIMEOUT flag clear bit Set by software to clear the ACKTIMEOUT flag."]
#[inline(always)]
pub fn acktimeoutc(&self) -> ACKTIMEOUTC_R {
ACKTIMEOUTC_R::new(((self.bits >> 24) & 1) != 0)
}
#[doc = "Bit 25 - VSWEND flag clear bit Set by software to clear the VSWEND flag."]
#[inline(always)]
pub fn vswendc(&self) -> VSWENDC_R {
VSWENDC_R::new(((self.bits >> 25) & 1) != 0)
}
#[doc = "Bit 26 - CKSTOP flag clear bit Set by software to clear the CKSTOP flag."]
#[inline(always)]
pub fn ckstopc(&self) -> CKSTOPC_R {
CKSTOPC_R::new(((self.bits >> 26) & 1) != 0)
}
#[doc = "Bit 27 - IDMA transfer error clear bit Set by software to clear the IDMATE flag."]
#[inline(always)]
pub fn idmatec(&self) -> IDMATEC_R {
IDMATEC_R::new(((self.bits >> 27) & 1) != 0)
}
#[doc = "Bit 28 - IDMA buffer transfer complete clear bit Set by software to clear the IDMABTC flag."]
#[inline(always)]
pub fn idmabtcc(&self) -> IDMABTCC_R {
IDMABTCC_R::new(((self.bits >> 28) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - CCRCFAIL flag clear bit Set by software to clear the CCRCFAIL flag."]
#[inline(always)]
#[must_use]
pub fn ccrcfailc(&mut self) -> CCRCFAILC_W<SDMMC_ICR_SPEC, 0> {
CCRCFAILC_W::new(self)
}
#[doc = "Bit 1 - DCRCFAIL flag clear bit Set by software to clear the DCRCFAIL flag."]
#[inline(always)]
#[must_use]
pub fn dcrcfailc(&mut self) -> DCRCFAILC_W<SDMMC_ICR_SPEC, 1> {
DCRCFAILC_W::new(self)
}
#[doc = "Bit 2 - CTIMEOUT flag clear bit Set by software to clear the CTIMEOUT flag."]
#[inline(always)]
#[must_use]
pub fn ctimeoutc(&mut self) -> CTIMEOUTC_W<SDMMC_ICR_SPEC, 2> {
CTIMEOUTC_W::new(self)
}
#[doc = "Bit 3 - DTIMEOUT flag clear bit Set by software to clear the DTIMEOUT flag."]
#[inline(always)]
#[must_use]
pub fn dtimeoutc(&mut self) -> DTIMEOUTC_W<SDMMC_ICR_SPEC, 3> {
DTIMEOUTC_W::new(self)
}
#[doc = "Bit 4 - TXUNDERR flag clear bit Set by software to clear TXUNDERR flag."]
#[inline(always)]
#[must_use]
pub fn txunderrc(&mut self) -> TXUNDERRC_W<SDMMC_ICR_SPEC, 4> {
TXUNDERRC_W::new(self)
}
#[doc = "Bit 5 - RXOVERR flag clear bit Set by software to clear the RXOVERR flag."]
#[inline(always)]
#[must_use]
pub fn rxoverrc(&mut self) -> RXOVERRC_W<SDMMC_ICR_SPEC, 5> {
RXOVERRC_W::new(self)
}
#[doc = "Bit 6 - CMDREND flag clear bit Set by software to clear the CMDREND flag."]
#[inline(always)]
#[must_use]
pub fn cmdrendc(&mut self) -> CMDRENDC_W<SDMMC_ICR_SPEC, 6> {
CMDRENDC_W::new(self)
}
#[doc = "Bit 7 - CMDSENT flag clear bit Set by software to clear the CMDSENT flag."]
#[inline(always)]
#[must_use]
pub fn cmdsentc(&mut self) -> CMDSENTC_W<SDMMC_ICR_SPEC, 7> {
CMDSENTC_W::new(self)
}
#[doc = "Bit 8 - DATAEND flag clear bit Set by software to clear the DATAEND flag."]
#[inline(always)]
#[must_use]
pub fn dataendc(&mut self) -> DATAENDC_W<SDMMC_ICR_SPEC, 8> {
DATAENDC_W::new(self)
}
#[doc = "Bit 9 - DHOLD flag clear bit Set by software to clear the DHOLD flag."]
#[inline(always)]
#[must_use]
pub fn dholdc(&mut self) -> DHOLDC_W<SDMMC_ICR_SPEC, 9> {
DHOLDC_W::new(self)
}
#[doc = "Bit 10 - DBCKEND flag clear bit Set by software to clear the DBCKEND flag."]
#[inline(always)]
#[must_use]
pub fn dbckendc(&mut self) -> DBCKENDC_W<SDMMC_ICR_SPEC, 10> {
DBCKENDC_W::new(self)
}
#[doc = "Bit 11 - DABORT flag clear bit Set by software to clear the DABORT flag."]
#[inline(always)]
#[must_use]
pub fn dabortc(&mut self) -> DABORTC_W<SDMMC_ICR_SPEC, 11> {
DABORTC_W::new(self)
}
#[doc = "Bit 21 - BUSYD0END flag clear bit Set by software to clear the BUSYD0END flag."]
#[inline(always)]
#[must_use]
pub fn busyd0endc(&mut self) -> BUSYD0ENDC_W<SDMMC_ICR_SPEC, 21> {
BUSYD0ENDC_W::new(self)
}
#[doc = "Bit 22 - SDIOIT flag clear bit Set by software to clear the SDIOIT flag."]
#[inline(always)]
#[must_use]
pub fn sdioitc(&mut self) -> SDIOITC_W<SDMMC_ICR_SPEC, 22> {
SDIOITC_W::new(self)
}
#[doc = "Bit 23 - ACKFAIL flag clear bit Set by software to clear the ACKFAIL flag."]
#[inline(always)]
#[must_use]
pub fn ackfailc(&mut self) -> ACKFAILC_W<SDMMC_ICR_SPEC, 23> {
ACKFAILC_W::new(self)
}
#[doc = "Bit 24 - ACKTIMEOUT flag clear bit Set by software to clear the ACKTIMEOUT flag."]
#[inline(always)]
#[must_use]
pub fn acktimeoutc(&mut self) -> ACKTIMEOUTC_W<SDMMC_ICR_SPEC, 24> {
ACKTIMEOUTC_W::new(self)
}
#[doc = "Bit 25 - VSWEND flag clear bit Set by software to clear the VSWEND flag."]
#[inline(always)]
#[must_use]
pub fn vswendc(&mut self) -> VSWENDC_W<SDMMC_ICR_SPEC, 25> {
VSWENDC_W::new(self)
}
#[doc = "Bit 26 - CKSTOP flag clear bit Set by software to clear the CKSTOP flag."]
#[inline(always)]
#[must_use]
pub fn ckstopc(&mut self) -> CKSTOPC_W<SDMMC_ICR_SPEC, 26> {
CKSTOPC_W::new(self)
}
#[doc = "Bit 27 - IDMA transfer error clear bit Set by software to clear the IDMATE flag."]
#[inline(always)]
#[must_use]
pub fn idmatec(&mut self) -> IDMATEC_W<SDMMC_ICR_SPEC, 27> {
IDMATEC_W::new(self)
}
#[doc = "Bit 28 - IDMA buffer transfer complete clear bit Set by software to clear the IDMABTC flag."]
#[inline(always)]
#[must_use]
pub fn idmabtcc(&mut self) -> IDMABTCC_W<SDMMC_ICR_SPEC, 28> {
IDMABTCC_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "The SDMMC_ICR register is a write-only register. Writing a bit with 1 clears the corresponding bit in the SDMMC_STAR status register.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`sdmmc_icr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`sdmmc_icr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct SDMMC_ICR_SPEC;
impl crate::RegisterSpec for SDMMC_ICR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`sdmmc_icr::R`](R) reader structure"]
impl crate::Readable for SDMMC_ICR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`sdmmc_icr::W`](W) writer structure"]
impl crate::Writable for SDMMC_ICR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets SDMMC_ICR to value 0"]
impl crate::Resettable for SDMMC_ICR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use std::io::{self, prelude::*};
use std::collections::BinaryHeap;
use std::error::Error;
use std::collections::BinaryHeap;
/*
- read grid
- fill max-heap "peaks" with:
[val, i, j] for val > 1
- while heap, pop "me":
if my value in my cell is larger than me,
ignore; since Ive already been dequeued
for each of my 4 nbrs,
if theyre less than (me - 1):
add the delta to total
re-add them to the heap (if theyre > 1)
*/
type Grid = Vec<Vec<u32>>;
fn boxes_to_add(grid: &mut Grid) -> u64 {
let mut heap = BinaryHeap::new();
for i in 0..grid.len() {
for j in 0..grid[i].len() {
let val = grid[i][j];
if val >= 2 {
heap.push((val, i, j));
}
}
}
let mut total_delta = 0u64;
while let Some((val, i, j)) = heap.pop() {
if grid[i][j] > val { continue; } // Already been here.
for (x, y) in nbrs(grid, i as isize, j as isize) {
let nbr = &mut grid[x][y];
if *nbr < val - 1 {
let delta = val - 1 - *nbr;
total_delta += delta as u64;
*nbr = val - 1;
if *nbr >= 2 {
heap.push((*nbr, x, y));
}
}
}
}
total_delta
}
fn nbrs(grid: &Grid, i: isize, j: isize) -> impl Iterator<Item = (usize, usize)> {
let n = grid.len() as isize;
let m = grid[0].len() as isize;
[(0, -1), (0, 1), (-1, 0), (1, 0)].iter().filter_map(move |&(di, dj)| {
let x = i + di;
let y = j + dj;
if 0 <= x && x < n && 0 <= y && y < m {
Some((x as usize, y as usize))
} else {
None
}
})
}
type Res<T> = Result<T, Box<dyn Error>>;
fn main() -> Res<()> {
run_tests(io::stdin().lock().lines())
}
/// Panics on malformed input.
fn run_tests(mut lines: impl Iterator<Item = io::Result<String>>) -> Res<()> {
let line = lines.next().unwrap()?;
let t = line.parse()?;
for test_no in 1..=t {
let mut grid = read_test_input(&mut lines)?;
let ans = boxes_to_add(&mut grid);
println!("Case #{}: {}", test_no, ans);
}
assert!(lines.next().is_none());
Ok(())
}
/// Panics on malformed input.
fn read_test_input(lines: &mut impl Iterator<Item = io::Result<String>>) -> Res<Grid> {
let line = lines.next().unwrap()?;
let mut words = line.split_whitespace();
let r: usize = words.next().unwrap().parse()?;
let c: usize = words.next().unwrap().parse()?;
assert!(words.next().is_none());
let mut grid = Vec::with_capacity(r);
for _ in 0..r {
let line = lines.next().unwrap()?;
let row: Vec<_> = line.split_whitespace().map(|w| w.parse::<u32>().unwrap()).collect();
assert_eq!(row.len(), c);
grid.push(row);
}
Ok(grid)
}
|
pub use rustc_serialize::json::{Json, ToJson, decode};
pub use super::{Validator, ValidateResult, ValidateResults};
pub use super::{BaseDataMap, BaseDataMapDecoder};
pub use super::{ConnectionPool, InsertResult};
pub mod pages;
mod pages_test;
|
use std::fmt;
use lval::LVal;
use lenv::LEnv;
#[derive(PartialEq)]
enum ArithmeticOp {
ADD, SUB, MUL, DIV, MOD,
MIN, MAX
}
impl fmt::Display for ArithmeticOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::ArithmeticOp::*;
match *self {
ADD => write!(f, "{}", "+"),
SUB => write!(f, "{}", "-"),
MUL => write!(f, "{}", "*"),
DIV => write!(f, "{}", "/"),
MOD => write!(f, "{}", "%"),
MIN => write!(f, "{}", "min"),
MAX => write!(f, "{}", "max")
}
}
}
fn builtin_op(op: ArithmeticOp, mut args: Vec<LVal>) -> LVal {
use self::ArithmeticOp::*;
// Make sure all arguments are numbers
builtin_assert!(op; args[*] is number);
builtin_assert!(op; args.len() >= 1);
//let mut x = *args[0].as_num();
let mut x = args.remove(0).into_num();
// Perform unary minus operation
if op == SUB && args.len() == 0 {
return LVal::num(-1.0 * x)
}
builtin_assert!(op; args.len() >= 1);
for arg in args {
let y = arg.into_num();
x = match op {
ADD => x + y,
SUB => x - y,
MUL => x * y,
DIV => {
if y == 0.0 { err!("division by zero!") }
x / y
},
MOD => {
if y == 0.0 { err!("division by zero!") }
x % y
},
MIN => if x > y { y } else { x },
MAX => if x > y { x } else { y }
};
}
LVal::num(x)
}
pub fn builtin_add(_: &mut LEnv, args: Vec<LVal>) -> LVal {
builtin_op(ArithmeticOp::ADD, args)
}
pub fn builtin_sub(_: &mut LEnv, args: Vec<LVal>) -> LVal {
builtin_op(ArithmeticOp::SUB, args)
}
pub fn builtin_mul(_: &mut LEnv, args: Vec<LVal>) -> LVal {
builtin_op(ArithmeticOp::MUL, args)
}
pub fn builtin_div(_: &mut LEnv, args: Vec<LVal>) -> LVal {
builtin_op(ArithmeticOp::DIV, args)
}
pub fn builtin_mod(_: &mut LEnv, args: Vec<LVal>) -> LVal {
builtin_op(ArithmeticOp::MOD, args)
}
pub fn builtin_min(_: &mut LEnv, args: Vec<LVal>) -> LVal {
builtin_op(ArithmeticOp::MIN, args)
}
pub fn builtin_max(_: &mut LEnv, args: Vec<LVal>) -> LVal {
builtin_op(ArithmeticOp::MAX, args)
}
// --- Tests --------------------------------------------------------------------
#[cfg(test)]
mod test {
use lval::LVal;
use super::{builtin_op, ArithmeticOp};
#[test]
fn builtin_op_few_arguments() {
assert_eq!(
builtin_op(ArithmeticOp::ADD, vec![
LVal::num(2.0)
]),
LVal::err("`+` called with too few arguments: expected at least 1, got 0".to_string())
)
}
#[test]
fn builtin_op_plus() {
assert_eq!(
builtin_op(ArithmeticOp::ADD, vec![
LVal::num(2.0),
LVal::num(3.0),
LVal::num(4.0),
LVal::num(5.0)
]),
LVal::num(14.0)
)
}
#[test]
fn builtin_op_minus() {
assert_eq!(
builtin_op(ArithmeticOp::SUB, vec![
LVal::num(2.0),
LVal::num(3.0)
]),
LVal::num(-1.0)
)
}
#[test]
fn builtin_op_minus_unary() {
assert_eq!(
builtin_op(ArithmeticOp::SUB, vec![
LVal::num(2.0)
]),
LVal::num(-2.0)
)
}
#[test]
fn builtin_op_mul() {
assert_eq!(
builtin_op(ArithmeticOp::MUL, vec![
LVal::num(2.0),
LVal::num(3.0)
]),
LVal::num(6.0)
)
}
#[test]
fn builtin_op_div() {
assert_eq!(
builtin_op(ArithmeticOp::DIV, vec![
LVal::num(2.0),
LVal::num(3.0)
]),
LVal::num(2.0 / 3.0)
)
}
#[test]
fn builtin_op_modulo() {
assert_eq!(
builtin_op(ArithmeticOp::MOD, vec![
LVal::num(15.0),
LVal::num(12.0)
]),
LVal::num(3.0)
)
}
#[test]
fn builtin_op_min() {
assert_eq!(
builtin_op(ArithmeticOp::MIN, vec![
LVal::num(2.0),
LVal::num(3.0)
]),
LVal::num(2.0)
)
}
#[test]
fn builtin_op_max() {
assert_eq!(
builtin_op(ArithmeticOp::MAX, vec![
LVal::num(2.0),
LVal::num(3.0)
]),
LVal::num(3.0)
)
}
} |
use core::time::Duration;
use fastping_rs::PingResult::{Idle, Receive};
use influent::measurement::{Measurement, Value};
#[derive(Debug)]
pub struct PingResult {
rtt: Duration,
loss: f32
}
impl PingResult {
pub fn new() -> PingResult {
PingResult { rtt: Duration::from_millis(0), loss: 0.0 }
}
pub fn handle(&mut self, res: fastping_rs::PingResult) {
match res {
Idle{addr: _} => self.loss += 1.0,
Receive{addr: _, rtt} => self.rtt += rtt
}
}
pub fn rtt(&self) -> Duration { self.rtt }
pub fn loss(&self) -> f32 { self.loss }
pub fn update(&mut self, nping: u32) -> &mut PingResult {
let fping = nping as f32;
let loss = self.loss as u32;
let div = if loss >= nping { 1 } else { nping - loss };
self.rtt /= div;
self.loss *= 100.0 / fping;
self
}
pub fn into_measurements(&self, host: String) -> Vec<Measurement> {
let mut rtt = Measurement::new("ttl");
rtt.add_field("value",
Value::Float(self.rtt.as_micros() as f64 / 1000.0));
rtt.add_tag("host", host.clone());
rtt.add_tag("srchost", "mac");
let mut loss = Measurement::new("loss");
loss.add_field("value", Value::Float(self.loss as f64));
loss.add_tag("host", host);
loss.add_tag("srchost", "mac");
vec![rtt, loss]
}
}
|
use crate::components;
use anyhow::Result;
use maud::{html, Markup, PreEscaped};
use rustimate_service::{RequestContext, Router};
pub(crate) fn page(ctx: &RequestContext, router: &dyn Router, title: &str, content: &Markup) -> Result<Markup> {
Ok(html! {
(PreEscaped("<!DOCTYPE html>"))
html lang="en" {
(components::header::header(ctx, router, &format!("{} - {}", title, rustimate_core::APPNAME))?)
body.(ctx.user_profile().theme().body_class()) {
(content)
}
}
})
}
|
use std::cmp::min;
use std::vec::Vec;
/// Uses row major layout.
pub struct Matrix2d<TNode> {
rows: usize,
cols: usize,
length: usize,
nodes: Vec<TNode>,
}
impl<TNode> Matrix2d<TNode> {
/// Used to create a matrix of the specified size.
/// #arguments
/// * `rows` - The number of rows in the matrix.
/// * `columns` - The number of columns in a matrix.
/// * `created_fn` - The function to use to create the node.
pub fn new(rows: usize, cols: usize, create_fn: fn(usize, usize) -> TNode) -> Matrix2d<TNode> {
let length = rows * cols;
let mut nodes = Vec::with_capacity(length);
for row in 0..rows {
for col in 0..cols {
nodes.push(create_fn(row, col))
}
}
Matrix2d {
rows,
cols,
length,
nodes,
}
}
/// Used to calculate the position of a node.
/// # Arguments
/// `row` - The position in the row to calculate.
/// `col` - The position in the column to calculate.
fn calculate_pos(&self, row: usize, col: usize) -> usize {
self.rows * row + col
}
/// Used to get the mutable reference of the node.
/// # Arguments
/// `row` - The row to get the node for.
/// `col` - The col to get the node for.
pub fn get_node(&mut self, row: usize, col: usize) -> Option<&mut TNode> {
let pos = self.calculate_pos(row, col) as usize;
if pos < self.length {
Some(&mut self.nodes[pos])
} else {
None
}
}
/// Used to walk a nodes in a grid
/// # Arguments
/// `row_start` - The starting of the row.
/// `row_end` - The ending row.
/// `col_start` - The starting column.
/// `col_end` - The ending column.
/// `act` - The action to run on the nodes.
pub fn walk_nodes(
&mut self,
row_start: usize,
row_end: usize,
col_start: usize,
col_end: usize,
act: fn(row: usize, col: usize, node: &mut TNode),
) {
let row_end_c = min(self.rows, row_end + 1);
let col_end_c = min(self.cols, col_end + 1);
for row in row_start..row_end_c {
for col in col_start..col_end_c {
if let Some(node) = self.get_node(row, col) {
act(row, col, node);
}
}
}
}
}
#[cfg(test)]
mod tests {
use crate::graphic::matrix_2d::Matrix2d;
struct Node {
row: usize,
col: usize,
message: String,
}
#[test]
fn create_test() {
let mut m = Matrix2d::new(10, 10, |row, col| Node {
row,
col,
message: String::from("Hello world!"),
});
let mut n = m.get_node(1, 1);
match n {
Some(node) => node.message = String::from("hello"),
_ => {}
}
let other = m.get_node(1, 1);
match other {
Some(node) => assert_eq!(&node.message[0..], "hello"),
_ => {}
}
}
#[test]
fn walk_nodes_test() {
let mut m = Matrix2d::new(10, 10, |row, col| Node {
row,
col,
message: String::from("Hello world!"),
});
m.walk_nodes(3, 5, 2, 4, |x, y, node| {
assert!(x >= 3);
assert!(x <= 5);
assert!(y >= 2);
assert!(y <= 4);
});
}
#[test]
fn walk_nodes_out_of_bounds() {
let mut m = Matrix2d::new(10, 10, |row, col| Node {
row,
col,
message: String::from("Hello world!"),
});
m.walk_nodes(8, 11, 9, 11, |x, y, node| {
assert!(x >= 8);
assert!(x <= 9);
assert!(y >= 9);
assert!(y <= 9);
});
}
}
|
//! Serializing Rust data types into CDR.
use std::{self, io::Write, marker::PhantomData};
use byteorder::{ByteOrder, WriteBytesExt};
use serde::ser;
use crate::{
error::{Error, Result},
size::{calc_serialized_data_size, calc_serialized_data_size_bounded, Infinite, SizeLimit},
};
/// A serializer that writes values into a buffer.
pub struct Serializer<W, E> {
writer: W,
pos: u64,
phantom: PhantomData<E>,
}
impl<W, E> Serializer<W, E>
where
W: Write,
E: ByteOrder,
{
pub fn new(writer: W) -> Self {
Self {
writer,
pos: 0,
phantom: PhantomData,
}
}
fn add_pos(&mut self, size: u64) {
self.pos += size;
}
pub(crate) fn reset_pos(&mut self) {
self.pos = 0;
}
fn set_pos_of<T>(&mut self) -> Result<()> {
self.write_padding_of::<T>()?;
self.add_pos(std::mem::size_of::<T>() as u64);
Ok(())
}
fn write_padding_of<T>(&mut self) -> Result<()> {
// Calculate the required padding to align with 1-byte, 2-byte, 4-byte, 8-byte
// boundaries Instead of using the slow modulo operation '%', the faster
// bit-masking is used
const PADDING: [u8; 8] = [0; 8];
let alignment = std::mem::size_of::<T>();
let rem_mask = alignment - 1; // mask like 0x0, 0x1, 0x3, 0x7
match (self.pos as usize) & rem_mask {
0 => Ok(()),
n @ 1..=7 => {
let amt = alignment - n;
self.pos += amt as u64;
self.writer.write_all(&PADDING[..amt]).map_err(Into::into)
}
_ => unreachable!(),
}
}
fn write_usize_as_u32(&mut self, v: usize) -> Result<()> {
if v > std::u32::MAX as usize {
return Err(Error::NumberOutOfRange);
}
ser::Serializer::serialize_u32(self, v as u32)
}
}
macro_rules! impl_serialize_value {
($ser_method:ident($ty:ty) = $writer_method:ident()) => {
fn $ser_method(self, v: $ty) -> Result<Self::Ok> {
self.set_pos_of::<$ty>()?;
self.writer.$writer_method::<E>(v).map_err(Into::into)
}
};
}
impl<'a, W, E> ser::Serializer for &'a mut Serializer<W, E>
where
W: Write,
E: ByteOrder,
{
type Error = Error;
type Ok = ();
type SerializeMap = Compound<'a, W, E>;
type SerializeSeq = Compound<'a, W, E>;
type SerializeStruct = Compound<'a, W, E>;
type SerializeStructVariant = Compound<'a, W, E>;
type SerializeTuple = Compound<'a, W, E>;
type SerializeTupleStruct = Compound<'a, W, E>;
type SerializeTupleVariant = Compound<'a, W, E>;
impl_serialize_value! { serialize_i16(i16) = write_i16() }
impl_serialize_value! { serialize_i32(i32) = write_i32() }
impl_serialize_value! { serialize_i64(i64) = write_i64() }
impl_serialize_value! { serialize_u16(u16) = write_u16() }
impl_serialize_value! { serialize_u32(u32) = write_u32() }
impl_serialize_value! { serialize_u64(u64) = write_u64() }
impl_serialize_value! { serialize_f32(f32) = write_f32() }
impl_serialize_value! { serialize_f64(f64) = write_f64() }
fn serialize_bool(self, v: bool) -> Result<Self::Ok> {
self.set_pos_of::<bool>()?;
self.writer.write_u8(v as u8).map_err(Into::into)
}
fn serialize_i8(self, v: i8) -> Result<Self::Ok> {
self.set_pos_of::<i8>()?;
self.writer.write_i8(v).map_err(Into::into)
}
fn serialize_u8(self, v: u8) -> Result<Self::Ok> {
self.set_pos_of::<u8>()?;
self.writer.write_u8(v).map_err(Into::into)
}
fn serialize_char(self, v: char) -> Result<Self::Ok> {
if !v.is_ascii() {
Err(Error::InvalidChar(v))
} else {
let mut buf = [0u8; 1];
v.encode_utf8(&mut buf);
self.add_pos(1);
self.writer.write_all(&buf).map_err(Into::into)
}
}
fn serialize_str(self, v: &str) -> Result<Self::Ok> {
if !v.is_ascii() {
Err(Error::InvalidString(v.into()))
} else {
let terminating_char = [0u8];
let l = v.len() + terminating_char.len();
self.write_usize_as_u32(l)?;
self.add_pos(l as u64);
self.writer.write_all(v.as_bytes())?;
self.writer.write_all(&terminating_char).map_err(Into::into)
}
}
fn serialize_bytes(self, v: &[u8]) -> Result<Self::Ok> {
let l = v.len();
self.write_usize_as_u32(l)?;
self.add_pos(l as u64);
self.writer.write_all(v).map_err(Into::into)
}
fn serialize_none(self) -> Result<Self::Ok> {
Err(Error::TypeNotSupported)
}
fn serialize_some<T>(self, _v: &T) -> Result<Self::Ok>
where
T: ser::Serialize + ?Sized,
{
Err(Error::TypeNotSupported)
}
fn serialize_unit(self) -> Result<Self::Ok> {
Ok(())
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok> {
Ok(())
}
fn serialize_unit_variant(
self,
_name: &'static str,
variant_index: u32,
_variant: &'static str,
) -> Result<Self::Ok> {
self.serialize_u32(variant_index)
}
fn serialize_newtype_struct<T>(self, _name: &'static str, value: &T) -> Result<Self::Ok>
where
T: ser::Serialize + ?Sized,
{
value.serialize(self)
}
fn serialize_newtype_variant<T>(
self,
_name: &'static str,
variant_index: u32,
_variant: &'static str,
value: &T,
) -> Result<Self::Ok>
where
T: ser::Serialize + ?Sized,
{
self.serialize_u32(variant_index)?;
value.serialize(self)
}
fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq> {
let len = len.ok_or(Error::SequenceMustHaveLength)?;
self.write_usize_as_u32(len)?;
Ok(Compound { ser: self })
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple> {
Ok(Compound { ser: self })
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct> {
Ok(Compound { ser: self })
}
fn serialize_tuple_variant(
self,
_name: &'static str,
variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant> {
self.serialize_u32(variant_index)?;
Ok(Compound { ser: self })
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap> {
Err(Error::TypeNotSupported)
}
fn serialize_struct(self, _name: &'static str, _len: usize) -> Result<Self::SerializeStruct> {
Ok(Compound { ser: self })
}
fn serialize_struct_variant(
self,
_name: &'static str,
variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant> {
self.serialize_u32(variant_index)?;
Ok(Compound { ser: self })
}
fn is_human_readable(&self) -> bool {
false
}
}
#[doc(hidden)]
pub struct Compound<'a, W: 'a, E: 'a> {
ser: &'a mut Serializer<W, E>,
}
impl<'a, W, E> ser::SerializeSeq for Compound<'a, W, E>
where
W: Write,
E: ByteOrder,
{
type Error = Error;
type Ok = ();
#[inline]
fn serialize_element<T>(&mut self, value: &T) -> Result<()>
where
T: ser::Serialize + ?Sized,
{
value.serialize(&mut *self.ser)
}
#[inline]
fn end(self) -> Result<()> {
Ok(())
}
}
impl<'a, W, E> ser::SerializeTuple for Compound<'a, W, E>
where
W: Write,
E: ByteOrder,
{
type Error = Error;
type Ok = ();
#[inline]
fn serialize_element<T>(&mut self, value: &T) -> Result<()>
where
T: ser::Serialize + ?Sized,
{
value.serialize(&mut *self.ser)
}
#[inline]
fn end(self) -> Result<()> {
Ok(())
}
}
impl<'a, W, E> ser::SerializeTupleStruct for Compound<'a, W, E>
where
W: Write,
E: ByteOrder,
{
type Error = Error;
type Ok = ();
#[inline]
fn serialize_field<T>(&mut self, value: &T) -> Result<()>
where
T: ser::Serialize + ?Sized,
{
value.serialize(&mut *self.ser)
}
#[inline]
fn end(self) -> Result<()> {
Ok(())
}
}
impl<'a, W, E> ser::SerializeTupleVariant for Compound<'a, W, E>
where
W: Write,
E: ByteOrder,
{
type Error = Error;
type Ok = ();
#[inline]
fn serialize_field<T>(&mut self, value: &T) -> Result<()>
where
T: ser::Serialize + ?Sized,
{
value.serialize(&mut *self.ser)
}
#[inline]
fn end(self) -> Result<()> {
Ok(())
}
}
impl<'a, W, E> ser::SerializeMap for Compound<'a, W, E>
where
W: Write,
E: ByteOrder,
{
type Error = Error;
type Ok = ();
#[inline]
fn serialize_key<T>(&mut self, key: &T) -> Result<()>
where
T: ser::Serialize + ?Sized,
{
key.serialize(&mut *self.ser)
}
#[inline]
fn serialize_value<T>(&mut self, value: &T) -> Result<()>
where
T: ser::Serialize + ?Sized,
{
value.serialize(&mut *self.ser)
}
#[inline]
fn end(self) -> Result<()> {
Ok(())
}
}
impl<'a, W, E> ser::SerializeStruct for Compound<'a, W, E>
where
W: Write,
E: ByteOrder,
{
type Error = Error;
type Ok = ();
#[inline]
fn serialize_field<T>(&mut self, _key: &'static str, value: &T) -> Result<()>
where
T: ser::Serialize + ?Sized,
{
value.serialize(&mut *self.ser)
}
#[inline]
fn end(self) -> Result<()> {
Ok(())
}
}
impl<'a, W, E> ser::SerializeStructVariant for Compound<'a, W, E>
where
W: Write,
E: ByteOrder,
{
type Error = Error;
type Ok = ();
#[inline]
fn serialize_field<T>(&mut self, _key: &'static str, value: &T) -> Result<()>
where
T: ser::Serialize + ?Sized,
{
value.serialize(&mut *self.ser)
}
#[inline]
fn end(self) -> Result<()> {
Ok(())
}
}
/// Serializes a serializable object into a `Vec` of bytes.
pub fn serialize_data<T, S, E>(value: &T, size_limit: S) -> Result<Vec<u8>>
where
T: ser::Serialize + ?Sized,
S: SizeLimit,
E: ByteOrder,
{
let mut writer = match size_limit.limit() {
Some(limit) => {
let actual_size = calc_serialized_data_size_bounded(value, limit)?;
Vec::with_capacity(actual_size as usize)
}
None => {
let size = calc_serialized_data_size(value) as usize;
Vec::with_capacity(size)
}
};
serialize_data_into::<_, _, _, E>(&mut writer, value, Infinite)?;
Ok(writer)
}
/// Serializes an object directly into a `Write`.
pub fn serialize_data_into<W, T, S, E>(writer: W, value: &T, size_limit: S) -> Result<()>
where
W: Write,
T: ser::Serialize + ?Sized,
S: SizeLimit,
E: ByteOrder,
{
if let Some(limit) = size_limit.limit() {
calc_serialized_data_size_bounded(value, limit)?;
}
let mut serializer = Serializer::<_, E>::new(writer);
ser::Serialize::serialize(value, &mut serializer)
}
#[cfg(test)]
mod tests {
use byteorder::{BigEndian, LittleEndian};
use super::*;
#[test]
fn serialize_octet() {
let v = 32u8;
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0x20]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x20]
);
}
#[test]
fn serialize_char() {
let v = 'Z';
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0x5a]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x5a]
);
}
#[test]
fn serialize_wchar() {
let v = 'Å';
assert!(serialize_data::<_, _, BigEndian>(&v, Infinite).is_err());
assert!(serialize_data::<_, _, LittleEndian>(&v, Infinite).is_err());
}
#[test]
fn serialize_ushort() {
let v = 65500u16;
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0xff, 0xdc]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0xdc, 0xff]
);
}
#[test]
fn serialize_short() {
let v = -32700i16;
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0x80, 0x44]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x44, 0x80]
);
}
#[test]
fn serialize_ulong() {
let v = 4294967200u32;
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0xff, 0xff, 0xff, 0xa0]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0xa0, 0xff, 0xff, 0xff]
);
}
#[test]
fn serialize_long() {
let v = -2147483600i32;
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0x80, 0x00, 0x00, 0x30]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x30, 0x00, 0x00, 0x80]
);
}
#[test]
fn serialize_ulonglong() {
let v = 18446744073709551600u64;
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]
);
}
#[test]
fn serialize_longlong() {
let v = -9223372036800i64;
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0xff, 0xff, 0xf7, 0x9c, 0x84, 0x2f, 0xa5, 0x40]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x40, 0xa5, 0x2f, 0x84, 0x9c, 0xf7, 0xff, 0xff]
);
}
#[test]
fn serialize_float() {
let v = std::f32::MIN_POSITIVE;
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0x00, 0x80, 0x00, 0x00]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x00, 0x00, 0x80, 0x00]
);
}
#[test]
fn serialize_double() {
let v = std::f64::MIN_POSITIVE;
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00]
);
}
#[test]
fn serialize_bool() {
let v = true;
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0x01]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x01]
);
}
#[test]
fn serialize_string() {
let v = "Hola a todos, esto es un test";
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x1e, 0x48, 0x6f, 0x6c, 0x61, 0x20, 0x61, 0x20, 0x74, 0x6f, 0x64,
0x6f, 0x73, 0x2c, 0x20, 0x65, 0x73, 0x74, 0x6f, 0x20, 0x65, 0x73, 0x20, 0x75, 0x6e,
0x20, 0x74, 0x65, 0x73, 0x74, 0x00,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x1e, 0x00, 0x00, 0x00, 0x48, 0x6f, 0x6c, 0x61, 0x20, 0x61, 0x20, 0x74, 0x6f, 0x64,
0x6f, 0x73, 0x2c, 0x20, 0x65, 0x73, 0x74, 0x6f, 0x20, 0x65, 0x73, 0x20, 0x75, 0x6e,
0x20, 0x74, 0x65, 0x73, 0x74, 0x00,
]
);
}
#[test]
fn serialize_wstring() {
let v = "みなさんこんにちは。これはテストです。";
assert!(serialize_data::<_, _, BigEndian>(&v, Infinite).is_err());
assert!(serialize_data::<_, _, LittleEndian>(&v, Infinite).is_err());
}
#[test]
fn serialize_empty_string() {
let v = "";
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0x00, 0x00, 0x00, 0x01, 0x00]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x01, 0x00, 0x00, 0x00, 0x00]
);
}
#[test]
fn serialize_octet_array() {
let v = [1u8, 2, 3, 4, 5];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0x01, 0x02, 0x03, 0x04, 0x05]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x01, 0x02, 0x03, 0x04, 0x05]
);
}
#[test]
fn serialize_char_array() {
let v = ['A', 'B', 'C', 'D', 'E'];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0x41, 0x42, 0x43, 0x44, 0x45]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x41, 0x42, 0x43, 0x44, 0x45]
);
}
#[test]
fn serialize_ushort_array() {
let v = [65500u16, 65501, 65502, 65503, 65504];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0xff, 0xdc, //
0xff, 0xdd, //
0xff, 0xde, //
0xff, 0xdf, //
0xff, 0xe0
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0xdc, 0xff, //
0xdd, 0xff, //
0xde, 0xff, //
0xdf, 0xff, //
0xe0, 0xff
]
);
}
#[test]
fn serialize_short_array() {
let v = [-32700i16, -32701, -32702, -32703, -32704];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x80, 0x44, //
0x80, 0x43, //
0x80, 0x42, //
0x80, 0x41, //
0x80, 0x40
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x44, 0x80, //
0x43, 0x80, //
0x42, 0x80, //
0x41, 0x80, //
0x40, 0x80
]
);
}
#[test]
fn serialize_ulong_array() {
let v = [
4294967200u32,
4294967201,
4294967202,
4294967203,
4294967204,
];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0xff, 0xff, 0xff, 0xa0, //
0xff, 0xff, 0xff, 0xa1, //
0xff, 0xff, 0xff, 0xa2, //
0xff, 0xff, 0xff, 0xa3, //
0xff, 0xff, 0xff, 0xa4,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0xa0, 0xff, 0xff, 0xff, //
0xa1, 0xff, 0xff, 0xff, //
0xa2, 0xff, 0xff, 0xff, //
0xa3, 0xff, 0xff, 0xff, //
0xa4, 0xff, 0xff, 0xff,
]
);
}
#[test]
fn serialize_long_array() {
let v = [
-2147483600,
-2147483601,
-2147483602,
-2147483603,
-2147483604,
];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x80, 0x00, 0x00, 0x30, //
0x80, 0x00, 0x00, 0x2f, //
0x80, 0x00, 0x00, 0x2e, //
0x80, 0x00, 0x00, 0x2d, //
0x80, 0x00, 0x00, 0x2c,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x30, 0x00, 0x00, 0x80, //
0x2f, 0x00, 0x00, 0x80, //
0x2e, 0x00, 0x00, 0x80, //
0x2d, 0x00, 0x00, 0x80, //
0x2c, 0x00, 0x00, 0x80,
]
);
}
#[test]
fn serialize_ulonglong_array() {
let v = [
18446744073709551600u64,
18446744073709551601,
18446744073709551602,
18446744073709551603,
18446744073709551604,
];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, //
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, //
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, //
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, //
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
]
);
}
#[test]
fn serialize_longlong_array() {
let v = [
-9223372036800i64,
-9223372036801,
-9223372036802,
-9223372036803,
-9223372036804,
];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0xff, 0xff, 0xf7, 0x9c, 0x84, 0x2f, 0xa5, 0x40, //
0xff, 0xff, 0xf7, 0x9c, 0x84, 0x2f, 0xa5, 0x3f, //
0xff, 0xff, 0xf7, 0x9c, 0x84, 0x2f, 0xa5, 0x3e, //
0xff, 0xff, 0xf7, 0x9c, 0x84, 0x2f, 0xa5, 0x3d, //
0xff, 0xff, 0xf7, 0x9c, 0x84, 0x2f, 0xa5, 0x3c,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x40, 0xa5, 0x2f, 0x84, 0x9c, 0xf7, 0xff, 0xff, //
0x3f, 0xa5, 0x2f, 0x84, 0x9c, 0xf7, 0xff, 0xff, //
0x3e, 0xa5, 0x2f, 0x84, 0x9c, 0xf7, 0xff, 0xff, //
0x3d, 0xa5, 0x2f, 0x84, 0x9c, 0xf7, 0xff, 0xff, //
0x3c, 0xa5, 0x2f, 0x84, 0x9c, 0xf7, 0xff, 0xff,
]
);
}
#[test]
fn serialize_float_array() {
let f = std::f32::MIN_POSITIVE;
let v = [f, f + 1., f + 2., f + 3., f + 4.];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x80, 0x00, 0x00, //
0x3f, 0x80, 0x00, 0x00, //
0x40, 0x00, 0x00, 0x00, //
0x40, 0x40, 0x00, 0x00, //
0x40, 0x80, 0x00, 0x00,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x80, 0x00, //
0x00, 0x00, 0x80, 0x3f, //
0x00, 0x00, 0x00, 0x40, //
0x00, 0x00, 0x40, 0x40, //
0x00, 0x00, 0x80, 0x40,
]
);
}
#[test]
fn serialize_double_array() {
let f = std::f64::MIN_POSITIVE;
let v = [f, f + 1., f + 2., f + 3., f + 4.];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
0x3f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
0x40, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
0x40, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, //
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, //
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, //
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, //
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x40,
]
);
}
#[test]
fn serialize_bool_array() {
let v = [true, false, true, false, true];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![0x01, 0x00, 0x01, 0x00, 0x01]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![0x01, 0x00, 0x01, 0x00, 0x01]
);
}
#[test]
fn serialize_string_array() {
let v = ["HOLA", "ADIOS", "HELLO", "BYE", "GOODBYE"];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0x48, 0x4f, 0x4c, 0x41, 0x00, //
0x00, 0x00, 0x00, //
0x00, 0x00, 0x00, 0x06, //
0x41, 0x44, 0x49, 0x4f, 0x53, 0x00, //
0x00, 0x00, //
0x00, 0x00, 0x00, 0x06, //
0x48, 0x45, 0x4c, 0x4c, 0x4f, 0x00, //
0x00, 0x00, //
0x00, 0x00, 0x00, 0x04, //
0x42, 0x59, 0x45, 0x00, //
0x00, 0x00, 0x00, 0x08, //
0x47, 0x4f, 0x4f, 0x44, 0x42, 0x59, 0x45, 0x00,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0x48, 0x4f, 0x4c, 0x41, 0x00, //
0x00, 0x00, 0x00, //
0x06, 0x00, 0x00, 0x00, //
0x41, 0x44, 0x49, 0x4f, 0x53, 0x00, //
0x00, 0x00, //
0x06, 0x00, 0x00, 0x00, //
0x48, 0x45, 0x4c, 0x4c, 0x4f, 0x00, //
0x00, 0x00, //
0x04, 0x00, 0x00, 0x00, //
0x42, 0x59, 0x45, 0x00, //
0x08, 0x00, 0x00, 0x00, //
0x47, 0x4f, 0x4f, 0x44, 0x42, 0x59, 0x45, 0x00,
]
);
}
#[test]
fn serialize_octet_sequence() {
let v = vec![1u8, 2, 3, 4, 5];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0x01, 0x02, 0x03, 0x04, 0x05
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0x01, 0x02, 0x03, 0x04, 0x05
]
);
}
#[test]
fn serialize_char_sequence() {
let v = vec!['A', 'B', 'C', 'D', 'E'];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0x41, 0x42, 0x43, 0x44, 0x45
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0x41, 0x42, 0x43, 0x44, 0x45
]
);
}
#[test]
fn serialize_ushort_sequence() {
let v = vec![65500u16, 65501, 65502, 65503, 65504];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0xff, 0xdc, //
0xff, 0xdd, //
0xff, 0xde, //
0xff, 0xdf, //
0xff, 0xe0
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0xdc, 0xff, //
0xdd, 0xff, //
0xde, 0xff, //
0xdf, 0xff, //
0xe0, 0xff
]
);
}
#[test]
fn serialize_short_sequence() {
let v = vec![-32700i16, -32701, -32702, -32703, -32704];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0x80, 0x44, //
0x80, 0x43, //
0x80, 0x42, //
0x80, 0x41, //
0x80, 0x40
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0x44, 0x80, //
0x43, 0x80, //
0x42, 0x80, //
0x41, 0x80, //
0x40, 0x80
]
);
}
#[test]
fn serialize_ulong_sequence() {
let v = vec![
4294967200u32,
4294967201,
4294967202,
4294967203,
4294967204,
];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0xff, 0xff, 0xff, 0xa0, //
0xff, 0xff, 0xff, 0xa1, //
0xff, 0xff, 0xff, 0xa2, //
0xff, 0xff, 0xff, 0xa3, //
0xff, 0xff, 0xff, 0xa4,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0xa0, 0xff, 0xff, 0xff, //
0xa1, 0xff, 0xff, 0xff, //
0xa2, 0xff, 0xff, 0xff, //
0xa3, 0xff, 0xff, 0xff, //
0xa4, 0xff, 0xff, 0xff,
]
);
}
#[test]
fn serialize_long_sequence() {
let v = vec![
-2147483600,
-2147483601,
-2147483602,
-2147483603,
-2147483604,
];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0x80, 0x00, 0x00, 0x30, //
0x80, 0x00, 0x00, 0x2f, //
0x80, 0x00, 0x00, 0x2e, //
0x80, 0x00, 0x00, 0x2d, //
0x80, 0x00, 0x00, 0x2c,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0x30, 0x00, 0x00, 0x80, //
0x2f, 0x00, 0x00, 0x80, //
0x2e, 0x00, 0x00, 0x80, //
0x2d, 0x00, 0x00, 0x80, //
0x2c, 0x00, 0x00, 0x80,
]
);
}
#[test]
fn serialize_ulonglong_sequence() {
let v = vec![
18446744073709551600u64,
18446744073709551601,
18446744073709551602,
18446744073709551603,
18446744073709551604,
];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0x00, 0x00, 0x00, 0x00, //
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, //
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, //
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, //
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, //
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0x00, 0x00, 0x00, 0x00, //
0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
]
);
}
#[test]
fn serialize_longlong_sequence() {
let v = vec![
-9223372036800i64,
-9223372036801,
-9223372036802,
-9223372036803,
-9223372036804,
];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0x00, 0x00, 0x00, 0x00, //
0xff, 0xff, 0xf7, 0x9c, 0x84, 0x2f, 0xa5, 0x40, //
0xff, 0xff, 0xf7, 0x9c, 0x84, 0x2f, 0xa5, 0x3f, //
0xff, 0xff, 0xf7, 0x9c, 0x84, 0x2f, 0xa5, 0x3e, //
0xff, 0xff, 0xf7, 0x9c, 0x84, 0x2f, 0xa5, 0x3d, //
0xff, 0xff, 0xf7, 0x9c, 0x84, 0x2f, 0xa5, 0x3c,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0x00, 0x00, 0x00, 0x00, //
0x40, 0xa5, 0x2f, 0x84, 0x9c, 0xf7, 0xff, 0xff, //
0x3f, 0xa5, 0x2f, 0x84, 0x9c, 0xf7, 0xff, 0xff, //
0x3e, 0xa5, 0x2f, 0x84, 0x9c, 0xf7, 0xff, 0xff, //
0x3d, 0xa5, 0x2f, 0x84, 0x9c, 0xf7, 0xff, 0xff, //
0x3c, 0xa5, 0x2f, 0x84, 0x9c, 0xf7, 0xff, 0xff,
]
);
}
#[test]
fn serialize_float_sequence() {
let f = std::f32::MIN_POSITIVE;
let v = vec![f, f + 1., f + 2., f + 3., f + 4.];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0x00, 0x80, 0x00, 0x00, //
0x3f, 0x80, 0x00, 0x00, //
0x40, 0x00, 0x00, 0x00, //
0x40, 0x40, 0x00, 0x00, //
0x40, 0x80, 0x00, 0x00,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0x00, 0x00, 0x80, 0x00, //
0x00, 0x00, 0x80, 0x3f, //
0x00, 0x00, 0x00, 0x40, //
0x00, 0x00, 0x40, 0x40, //
0x00, 0x00, 0x80, 0x40,
]
);
}
#[test]
fn serialize_double_sequence() {
let f = std::f64::MIN_POSITIVE;
let v = vec![f, f + 1., f + 2., f + 3., f + 4.];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0x00, 0x00, 0x00, 0x00, //
0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
0x3f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
0x40, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
0x40, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0x00, 0x00, 0x00, 0x00, //
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, //
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, //
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, //
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, //
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x40,
]
);
}
#[test]
fn serialize_bool_sequence() {
let v = vec![true, false, true, false, true];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0x01, 0x00, 0x01, 0x00, 0x01
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0x01, 0x00, 0x01, 0x00, 0x01
]
);
}
#[test]
fn serialize_string_sequence() {
let v = vec!["HOLA", "ADIOS", "HELLO", "BYE", "GOODBYE"];
assert_eq!(
serialize_data::<_, _, BigEndian>(&v, Infinite).unwrap(),
vec![
0x00, 0x00, 0x00, 0x05, //
0x00, 0x00, 0x00, 0x05, //
0x48, 0x4f, 0x4c, 0x41, 0x00, //
0x00, 0x00, 0x00, //
0x00, 0x00, 0x00, 0x06, //
0x41, 0x44, 0x49, 0x4f, 0x53, 0x00, //
0x00, 0x00, //
0x00, 0x00, 0x00, 0x06, //
0x48, 0x45, 0x4c, 0x4c, 0x4f, 0x00, //
0x00, 0x00, //
0x00, 0x00, 0x00, 0x04, //
0x42, 0x59, 0x45, 0x00, //
0x00, 0x00, 0x00, 0x08, //
0x47, 0x4f, 0x4f, 0x44, 0x42, 0x59, 0x45, 0x00,
]
);
assert_eq!(
serialize_data::<_, _, LittleEndian>(&v, Infinite).unwrap(),
vec![
0x05, 0x00, 0x00, 0x00, //
0x05, 0x00, 0x00, 0x00, //
0x48, 0x4f, 0x4c, 0x41, 0x00, //
0x00, 0x00, 0x00, //
0x06, 0x00, 0x00, 0x00, //
0x41, 0x44, 0x49, 0x4f, 0x53, 0x00, //
0x00, 0x00, //
0x06, 0x00, 0x00, 0x00, //
0x48, 0x45, 0x4c, 0x4c, 0x4f, 0x00, //
0x00, 0x00, //
0x04, 0x00, 0x00, 0x00, //
0x42, 0x59, 0x45, 0x00, //
0x08, 0x00, 0x00, 0x00, //
0x47, 0x4f, 0x4f, 0x44, 0x42, 0x59, 0x45, 0x00,
]
);
}
}
|
pub const HELP_MSG: &str =
"\nYou can check help page: https://github.com/ivan770/ares/wiki/Help";
|
use iced_graphics::Primitive;
use iced_native::{
Color as GraphicsColor, Font, HorizontalAlignment, Point, Rectangle, Size, VerticalAlignment,
};
use super::Rect;
use crate::cssom::Color;
use crate::layout::font as layout_font;
pub fn create_text(
content: String,
color: Color,
rect: Rect,
font: layout_font::Font,
font_context: &mut layout_font::FontContext,
) -> Primitive {
Primitive::Text {
content,
bounds: Rectangle::new(
Point::new(rect.x, rect.y),
Size::new(rect.width, rect.height),
),
color: GraphicsColor::from_rgba8(color.r, color.g, color.b, color.a),
size: font.size,
font: Font::External {
bytes: font.get_static_font_data(font_context),
name: font.get_static_hashed_family_name(),
},
horizontal_alignment: HorizontalAlignment::Left,
vertical_alignment: VerticalAlignment::Top,
}
}
|
//
// Part of Roadkill Project.
//
// Copyright 2010-2018, Berkus <berkus+github@metta.systems>
//
// Distributed under the Boost Software License, Version 1.0.
// (See file LICENSE_1_0.txt or a copy at http://www.boost.org/LICENSE_1_0.txt)
//
pub mod support;
#[cfg(feature = "convert")]
use crate::support::texture::PixelMap;
use {
crate::support::{camera::CameraState, car::Car, render_manager::RenderManager},
cgmath::Vector3,
glium::{
glutin::{
event::{Event, WindowEvent},
event_loop::ControlFlow,
},
Surface,
},
log::info,
};
fn setup_logging() -> Result<(), fern::InitError> {
let base_config = fern::Dispatch::new().format(|out, message, record| {
out.finish(format_args!(
"{}[{}][{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
record.target(),
record.level(),
message
))
});
let stdout_config = fern::Dispatch::new()
.level(log::LevelFilter::Info)
.chain(std::io::stdout());
let file_config = fern::Dispatch::new().level(log::LevelFilter::Trace).chain(
std::fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true) // start log file anew each run
.open("debug.log")?,
);
base_config
.chain(stdout_config)
.chain(file_config)
.apply()?;
Ok(())
}
use std::fs::{self, DirEntry};
use std::path::Path;
#[cfg(feature = "convert")]
use std::{fs::File, io::BufWriter, path::PathBuf};
// one possible implementation of walking a directory only visiting files
fn visit_dirs(dir: &Path, cb: &mut dyn for<'r> FnMut(&'r DirEntry)) -> Result<(), support::Error> {
if dir.is_dir() {
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
visit_dirs(&path, cb)?;
} else {
cb(&entry);
}
}
}
Ok(())
}
#[cfg(feature = "convert")]
fn convert_pixmap(fname: String, palette: &PixelMap) -> Result<(), support::Error> {
let pmap = PixelMap::load_from(fname.clone())
.expect(format!("Couldnt open pix file {:?}", fname).as_ref());
// let mut counter = 0;
for pix in pmap {
// counter += 1;
let mut pngname = PathBuf::from(&fname);
// let name = String::from(pngname.file_name().unwrap().to_str().unwrap());
pngname.set_file_name(&pix.name);
pngname.set_extension("png");
info!("Creating file {:?}", pngname);
let file = File::create(&pngname)
.expect(format!("Couldnt create png file {:?}", pngname).as_ref());
let w = &mut BufWriter::new(file);
pix.write_png_remapped_via(palette, w)
.expect("Failed to write PNG");
}
Ok(())
}
/// Uses different palette for race-selection part
#[cfg(feature = "convert")]
fn convert_menu_pixmap(fname: String) -> Result<(), support::Error> {
let palette =
&PixelMap::load_from(String::from("DecodedData/DATA/REG/PALETTES/DRACEFLC.PAL"))?[0];
convert_pixmap(fname, palette)
}
#[cfg(feature = "convert")]
fn convert_game_pixmap(fname: String) -> Result<(), support::Error> {
let palette =
&PixelMap::load_from(String::from("DecodedData/DATA/REG/PALETTES/DRRENDER.PAL"))?[0];
convert_pixmap(fname, palette)
}
/// Load palette once and then apply to a bunch of pixmap data
#[cfg(feature = "convert")]
fn convert_all_pixmaps() -> Result<(), support::Error> {
let palette =
&PixelMap::load_from(String::from("DecodedData/DATA/REG/PALETTES/DRRENDER.PAL"))?[0];
visit_dirs(Path::new("DecodedData"), &mut |dir_entry| {
if let Ok(file_type) = dir_entry.file_type() {
let fname = String::from(dir_entry.path().to_str().unwrap());
if file_type.is_file() && fname.ends_with(".PIX") {
convert_pixmap(fname, palette).unwrap();
}
}
})
}
fn main() {
setup_logging().expect("failed to initialize logging");
#[cfg(feature = "convert")]
{
convert_all_pixmaps().expect("Listing failed");
convert_game_pixmap(String::from("DecodedData/DATA/PIXELMAP/EAGYELE.PIX"))
.expect("Conversion failed");
}
// Load all cars and arrange in a grid 6x7 (40 cars total)
let mut cars = Vec::new();
let mut counter = 0;
visit_dirs(Path::new("DecodedData/DATA/CARS"), &mut |entry| {
if let Ok(file_type) = entry.file_type() {
let fname = String::from(entry.path().to_str().unwrap());
if file_type.is_file() && fname.ends_with(".ENC") {
let mut car = Car::load_from(fname).unwrap();
let z = 1.0f32 * f32::from(counter / 7);
let x = 1.0f32 * f32::from(counter % 7 as u16);
counter += 1;
info!("Moving car {} to {},0,{}", counter, x, -z);
car.base_translation = Vector3::from([x, 0f32, -z]);
cars.push(car);
}
}
})
.unwrap();
// Prepare window
let mut events_loop = glium::glutin::event_loop::EventLoop::new();
let window = glium::glutin::window::WindowBuilder::new()
.with_title("carma")
.with_inner_size(glium::glutin::dpi::LogicalSize::new(800.0, 600.0));
let windowed_context = glium::glutin::ContextBuilder::new();
let display = glium::Display::new(window, windowed_context, &events_loop).unwrap();
let mut render_manager = RenderManager::new(&display);
for car in &cars {
render_manager.prepare_car(car, &display);
}
let mut camera = CameraState::new();
events_loop.run(move |event, _, control_flow| {
println!("{:?}", event);
*control_flow = ControlFlow::Wait;
camera.update();
match event {
Event::LoopDestroyed => return,
Event::WindowEvent { event, .. } => match event {
WindowEvent::Resized(physical_size) => display.resize(physical_size),
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => camera.process_input(&event),
},
Event::RedrawRequested(_) => {
let mut frame = display.draw();
frame.clear_color(0.4, 0.4, 0.4, 0.0);
frame.clear_depth(1.0);
for car in &cars {
render_manager.draw_car(car, &mut frame, &camera);
}
frame.finish().unwrap();
// windowed_context.swap_buffers().unwrap();
}
_ => (),
}
});
}
|
mod hm_tools;
use hm_tools::{tools, art};
use std::io;
fn main() {
std::process::Command::new("clear").status().unwrap();
println!("Welcome to Hangman Official Terminal Game 1978 Copyright, all Right Reserved");
start();
}
fn start() {
let mut mistakes = 0;
let secret = tools::random_word();
let mut temp: Vec<char> = secret.clone().chars().collect();
tools::remove_duplicates(&mut temp);
let mut encrypted = tools::encrypt_word(&secret);
let mut guess: char;
loop{
println!("{}\n", art::SPRITES[mistakes]);
println!("{}\n", encrypted);
//println!("{}\n", secret);
//println!("{:?}\n", temp);
guess = get_input();
if is_guessed(guess, &mut temp) {
tools::decrypt_encrypted(guess, &mut encrypted, &secret);
}
else {
mistakes += 1;
}
std::process::Command::new("clear").status().unwrap();
if temp.is_empty() {
println!("______________________You Win!________________________");
break;
}
};
}
fn is_guessed(guess: char, list: &mut Vec<char>) -> bool{
if list.contains(&guess){
list.remove(list.iter().position(|&p| p == guess).unwrap());
return true;
}
false
}
fn get_input() -> char {
let mut input: String;
loop {
input = String::new();
println!("Try guessing the letter!");
match io::stdin().read_line(&mut input) {
Ok(_n) => {
if input.trim().chars().count() == 1 {
return input.trim().chars().nth(0).unwrap();
}
println!("Enter only one letter!");
continue;
},
Err(_err) => {
println!("Failed reading the input!");
continue;
},
}
};
} |
/*
* Datadog API V1 Collection
*
* Collection of all Datadog Public endpoints.
*
* The version of the OpenAPI document: 1.0
* Contact: support@datadoghq.com
* Generated by: https://openapi-generator.tech
*/
/// SloWidgetDefinition : Use the SLO and uptime widget to track your SLOs (Service Level Objectives) and uptime on screenboards and timeboards.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SloWidgetDefinition {
/// Defined global time target.
#[serde(rename = "global_time_target", skip_serializing_if = "Option::is_none")]
pub global_time_target: Option<String>,
/// Defined error budget.
#[serde(rename = "show_error_budget", skip_serializing_if = "Option::is_none")]
pub show_error_budget: Option<bool>,
/// ID of the SLO displayed.
#[serde(rename = "slo_id", skip_serializing_if = "Option::is_none")]
pub slo_id: Option<String>,
/// Times being monitored.
#[serde(rename = "time_windows", skip_serializing_if = "Option::is_none")]
pub time_windows: Option<Vec<crate::models::WidgetTimeWindows>>,
/// Title of the widget.
#[serde(rename = "title", skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
#[serde(rename = "title_align", skip_serializing_if = "Option::is_none")]
pub title_align: Option<crate::models::WidgetTextAlign>,
/// Size of the title.
#[serde(rename = "title_size", skip_serializing_if = "Option::is_none")]
pub title_size: Option<String>,
#[serde(rename = "type")]
pub _type: crate::models::SloWidgetDefinitionType,
#[serde(rename = "view_mode", skip_serializing_if = "Option::is_none")]
pub view_mode: Option<crate::models::WidgetViewMode>,
/// Type of view displayed by the widget.
#[serde(rename = "view_type")]
pub view_type: String,
}
impl SloWidgetDefinition {
/// Use the SLO and uptime widget to track your SLOs (Service Level Objectives) and uptime on screenboards and timeboards.
pub fn new(_type: crate::models::SloWidgetDefinitionType, view_type: String) -> SloWidgetDefinition {
SloWidgetDefinition {
global_time_target: None,
show_error_budget: None,
slo_id: None,
time_windows: None,
title: None,
title_align: None,
title_size: None,
_type,
view_mode: None,
view_type,
}
}
}
|
use crate::leet_code::common::chain_table::ListNode;
pub fn main() {
let list = ListNode::produce_chain(vec![1, 3, 2]);
let reverse_list = Solution::reverse_list(list);
println!("{:?}", reverse_list);
}
struct Solution;
impl Solution {
pub fn reverse_list(head: Option<Box<ListNode>>) -> Option<Box<ListNode>> {
let mut origin = head;
let mut reverse = None;
while let Some(mut value) = origin {
origin = value.next.take();
value.next = reverse;
reverse = Some(value);
}
reverse
}
} |
use byteorder::{ByteOrder, LittleEndian};
use libmdbx::*;
use tempfile::tempdir;
type Database = libmdbx::Database<NoWriteMap>;
#[test]
fn test_open() {
let dir = tempdir().unwrap();
// opening non-existent database with read-only should fail
assert!(Database::new()
.set_flags(Mode::ReadOnly.into())
.open(dir.path())
.is_err());
// opening non-existent database should succeed
assert!(Database::new().open(dir.path()).is_ok());
// opening database with read-only should succeed
assert!(Database::new()
.set_flags(Mode::ReadOnly.into())
.open(dir.path())
.is_ok());
}
#[test]
fn test_begin_txn() {
let dir = tempdir().unwrap();
{
// writable database
let db = Database::new().open(dir.path()).unwrap();
assert!(db.begin_rw_txn().is_ok());
assert!(db.begin_ro_txn().is_ok());
}
{
// read-only database
let db = Database::new()
.set_flags(Mode::ReadOnly.into())
.open(dir.path())
.unwrap();
assert!(db.begin_rw_txn().is_err());
assert!(db.begin_ro_txn().is_ok());
}
}
#[test]
fn test_open_table() {
let dir = tempdir().unwrap();
let db = Database::new().set_max_tables(1).open(dir.path()).unwrap();
let txn = db.begin_ro_txn().unwrap();
assert!(txn.open_table(None).is_ok());
assert!(txn.open_table(Some("test")).is_err());
}
#[test]
fn test_create_table() {
let dir = tempdir().unwrap();
let db = Database::new().set_max_tables(11).open(dir.path()).unwrap();
let txn = db.begin_rw_txn().unwrap();
assert!(txn.open_table(Some("test")).is_err());
assert!(txn.create_table(Some("test"), TableFlags::empty()).is_ok());
assert!(txn.open_table(Some("test")).is_ok())
}
#[test]
fn test_close_table() {
let dir = tempdir().unwrap();
let db = Database::new().set_max_tables(10).open(dir.path()).unwrap();
let txn = db.begin_rw_txn().unwrap();
txn.create_table(Some("test"), TableFlags::empty()).unwrap();
txn.open_table(Some("test")).unwrap();
}
#[test]
fn test_sync() {
let dir = tempdir().unwrap();
{
let db = Database::new().open(dir.path()).unwrap();
db.sync(true).unwrap();
}
{
let db = Database::new()
.set_flags(Mode::ReadOnly.into())
.open(dir.path())
.unwrap();
db.sync(true).unwrap_err();
}
}
#[test]
fn test_stat() {
let dir = tempdir().unwrap();
let db = Database::new().open(dir.path()).unwrap();
// Stats should be empty initially.
let stat = db.stat().unwrap();
assert_eq!(stat.depth(), 0);
assert_eq!(stat.branch_pages(), 0);
assert_eq!(stat.leaf_pages(), 0);
assert_eq!(stat.overflow_pages(), 0);
assert_eq!(stat.entries(), 0);
// Write a few small values.
for i in 0..64 {
let mut value = [0u8; 8];
LittleEndian::write_u64(&mut value, i);
let tx = db.begin_rw_txn().unwrap();
tx.put(
&tx.open_table(None).unwrap(),
value,
value,
WriteFlags::default(),
)
.unwrap();
tx.commit().unwrap();
}
// Stats should now reflect inserted values.
let stat = db.stat().unwrap();
assert_eq!(stat.depth(), 1);
assert_eq!(stat.branch_pages(), 0);
assert_eq!(stat.leaf_pages(), 1);
assert_eq!(stat.overflow_pages(), 0);
assert_eq!(stat.entries(), 64);
}
#[test]
fn test_info() {
let map_size = 1024 * 1024;
let dir = tempdir().unwrap();
let db = Database::new()
.set_geometry(Geometry {
size: Some(map_size..),
..Default::default()
})
.open(dir.path())
.unwrap();
let info = db.info().unwrap();
assert_eq!(info.geometry().min(), map_size as u64);
// assert_eq!(info.last_pgno(), 1);
// assert_eq!(info.last_txnid(), 0);
assert_eq!(info.num_readers(), 0);
}
#[test]
fn test_freelist() {
let dir = tempdir().unwrap();
let db = Database::new().open(dir.path()).unwrap();
let mut freelist = db.freelist().unwrap();
assert_eq!(freelist, 0);
// Write a few small values.
for i in 0..64 {
let mut value = [0u8; 8];
LittleEndian::write_u64(&mut value, i);
let tx = db.begin_rw_txn().unwrap();
tx.put(
&tx.open_table(None).unwrap(),
value,
value,
WriteFlags::default(),
)
.unwrap();
tx.commit().unwrap();
}
let tx = db.begin_rw_txn().unwrap();
tx.clear_table(&tx.open_table(None).unwrap()).unwrap();
tx.commit().unwrap();
// Freelist should not be empty after clear_table.
freelist = db.freelist().unwrap();
assert!(freelist > 0);
}
|
#![allow(unused_variables)]
use crate::scene::Scene;
use crate::spawns::Spawn;
use crate::types::System;
use super::components::*;
pub struct MoveSystem;
impl System<GameObject> for MoveSystem {
fn requirements(&self, target: &GameObject) -> bool {
target.has_position()
&& target.has_movement()
}
fn update(&mut self, spawn: &Spawn, scene: &mut Scene<GameObject>) {
let target = &mut scene.get_mut(spawn);
target.position.x += *target.movement.speed() as f64;
}
}
pub struct AttackSystem;
impl System<GameObject> for AttackSystem {
fn requirements(&self, target: &GameObject) -> bool {
target.has_position()
&& target.has_focus()
&& target.has_attack()
&& target.has_agenda()
}
fn update(&mut self, spawn: &Spawn, scene: &mut Scene<GameObject>) {
let target = &mut scene.get_mut(spawn);
// if target has a focus, than attack the first focus
if let Some(other_spawn) = target.focus.prime() {
let opponent = &mut scene.get_mut(other_spawn);
if opponent.has_health() {
opponent.damage.take_damage(target.attack.clone());
}
// if target doesn't have a focus find and add a new one
} else {
if let Some(spawn) = scene.search_components(|other| {
other.has_damage()
&& target.agenda.faction.opposing(&other.agenda.faction)
&& target.position.distance(&other.position) < 10.0
}) {
target.focus.add(&spawn);
}
}
}
}
pub struct DamageSystem;
impl System<GameObject> for DamageSystem {
fn requirements(&self, target: &GameObject) -> bool {
target.has_health()
&& target.has_damage()
}
fn update(&mut self, spawn: &Spawn, scene: &mut Scene<GameObject>) {
let target = &mut scene.get_mut(spawn);
for attack in target.damage.clone() {
let power = match target.has_defense() {
true => target.defense.resolve_attack(&attack),
false => attack.power(),
};
target.health.damage(power)
}
}
}
|
#[macro_use]
extern crate specs_derive;
#[macro_use]
extern crate shred_derive;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate airmash_server;
extern crate fnv;
extern crate htmlescape;
extern crate rand;
extern crate shred;
extern crate shrev;
extern crate simple_logger;
extern crate specs;
use airmash_server as server;
mod component;
mod config;
mod gamemode;
mod systems;
use std::env;
use gamemode::{CTFGameMode, BLUE_TEAM, RED_TEAM};
use server::AirmashServer;
fn main() {
env::set_var("RUST_BACKTRACE", "1");
simple_logger::init_with_level(log::Level::Info).unwrap();
let mut server = AirmashServer::new("0.0.0.0:3501")
.with_engine()
.with_gamemode(CTFGameMode::new());
server.builder = systems::register(&mut server.world, server.builder);
server.run();
}
|
#[doc = "Register `PUCRF` reader"]
pub type R = crate::R<PUCRF_SPEC>;
#[doc = "Register `PUCRF` writer"]
pub type W = crate::W<PUCRF_SPEC>;
#[doc = "Field `PU2` reader - Port F pull-up bit i (i = 2 to 0) Setting PUi bit while the corresponding PDi bit is zero and the APC bit of the PWR_CR3 register is set activates a pull-up device on the PF\\[i\\]
I/O. On STM32C011xx, only PU2 is available."]
pub type PU2_R = crate::BitReader;
#[doc = "Field `PU2` writer - Port F pull-up bit i (i = 2 to 0) Setting PUi bit while the corresponding PDi bit is zero and the APC bit of the PWR_CR3 register is set activates a pull-up device on the PF\\[i\\]
I/O. On STM32C011xx, only PU2 is available."]
pub type PU2_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 2 - Port F pull-up bit i (i = 2 to 0) Setting PUi bit while the corresponding PDi bit is zero and the APC bit of the PWR_CR3 register is set activates a pull-up device on the PF\\[i\\]
I/O. On STM32C011xx, only PU2 is available."]
#[inline(always)]
pub fn pu2(&self) -> PU2_R {
PU2_R::new(((self.bits >> 2) & 1) != 0)
}
}
impl W {
#[doc = "Bit 2 - Port F pull-up bit i (i = 2 to 0) Setting PUi bit while the corresponding PDi bit is zero and the APC bit of the PWR_CR3 register is set activates a pull-up device on the PF\\[i\\]
I/O. On STM32C011xx, only PU2 is available."]
#[inline(always)]
#[must_use]
pub fn pu2(&mut self) -> PU2_W<PUCRF_SPEC, 2> {
PU2_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "PWR Port F pull-up control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`pucrf::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`pucrf::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct PUCRF_SPEC;
impl crate::RegisterSpec for PUCRF_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`pucrf::R`](R) reader structure"]
impl crate::Readable for PUCRF_SPEC {}
#[doc = "`write(|w| ..)` method takes [`pucrf::W`](W) writer structure"]
impl crate::Writable for PUCRF_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets PUCRF to value 0"]
impl crate::Resettable for PUCRF_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
pub mod data_loader;
pub mod input;
pub mod models;
pub mod mutation;
pub mod query;
pub mod utils;
use crate::db::MysqlPooledConnection;
use std::sync::{Arc, Mutex};
//use std::error::Error;
use crate::errors::ServiceError;
use crate::graphql::data_loader::user::{UserByIdDataLoader, UserDataLoaderBatchById};
use crate::graphql::input::user::*;
use crate::graphql::models::user::*;
use crate::graphql::mutation::user::*;
use crate::graphql::query::user::*;
use crate::models::movie::Movie;
use crate::models::user::User;
use dataloader::Loader;
use diesel::prelude::*;
use diesel::MysqlConnection;
use crate::graphql::data_loader::character::{
CharacterByIdDataLoader, CharacterDataLoaderBatchById,
};
use crate::graphql::data_loader::movie::{MovieByIdDataLoader, MovieDataLoaderBatchById};
use crate::graphql::data_loader::movie_character::{
CharacterIdsByMovieIdDataLoader, CharacterIdsDataLoaderBatchByMovieId,
MovieIdsByCharacterIdDataLoader, MovieIdsDataLoaderBatchByCharacterId,
};
use crate::graphql::input::movie::MovieFilter;
use crate::graphql::query::character::characters;
use crate::graphql::query::movie::movies;
use crate::models::character::Character;
use juniper::EmptySubscription;
type SharedMysqlPoolConnection = Arc<Mutex<MysqlPooledConnection>>;
#[derive(Clone)]
pub struct Context {
pub db: SharedMysqlPoolConnection,
pub user: Option<User>,
pub user_data_loader_by_id: UserByIdDataLoader,
pub movie_data_loader_by_id: MovieByIdDataLoader,
pub character_data_loader_by_id: CharacterByIdDataLoader,
pub movie_ids_data_loader_by_character_id: MovieIdsByCharacterIdDataLoader,
pub character_ids_data_loader_by_movie_id: CharacterIdsByMovieIdDataLoader,
}
impl juniper::Context for Context {}
pub struct QueryRoot;
#[juniper::graphql_object(Context = Context)]
impl QueryRoot {
pub fn users(context: &Context) -> Result<Vec<User>, ServiceError> {
users(context)
}
pub fn movies(
context: &Context,
filter: Option<MovieFilter>,
) -> Result<Vec<Movie>, ServiceError> {
movies(context, filter)
}
pub fn characters(context: &Context) -> Result<Vec<Character>, ServiceError> {
characters(context)
}
/// Get the authenticated User
pub fn me(context: &Context) -> Result<User, ServiceError> {
me(context)
}
}
pub struct Mutation;
#[juniper::graphql_object(Context = Context)]
impl Mutation {
pub fn register(context: &Context, input: RegisterInput) -> Result<Token, ServiceError> {
register(context, input)
}
pub fn login(context: &Context, input: LoginInput) -> Result<Token, ServiceError> {
login(context, input)
}
}
pub type Schema = juniper::RootNode<'static, QueryRoot, Mutation, EmptySubscription<Context>>;
pub fn create_schema() -> Schema {
Schema::new(QueryRoot {}, Mutation {}, EmptySubscription::new())
}
pub fn create_context(user_email: Option<String>, mysql_pool: MysqlPooledConnection) -> Context {
let db = Arc::new(Mutex::new(mysql_pool));
Context {
user_data_loader_by_id: Loader::new(UserDataLoaderBatchById::new(Arc::clone(&db))).cached(),
movie_data_loader_by_id: Loader::new(MovieDataLoaderBatchById::new(Arc::clone(&db)))
.cached(),
character_data_loader_by_id: Loader::new(CharacterDataLoaderBatchById::new(Arc::clone(
&db,
)))
.cached(),
movie_ids_data_loader_by_character_id: Loader::new(
MovieIdsDataLoaderBatchByCharacterId::new(Arc::clone(&db)),
)
.cached(),
character_ids_data_loader_by_movie_id: Loader::new(
CharacterIdsDataLoaderBatchByMovieId::new(Arc::clone(&db)),
)
.cached(),
user: find_user(user_email, Arc::clone(&db)),
db,
}
}
pub fn find_user(
user_email: Option<String>,
db: Arc<Mutex<MysqlPooledConnection>>,
) -> Option<User> {
use crate::schema::users::dsl::*;
let conn: &MysqlConnection = &db.lock().unwrap();
let mut users_data = match users.filter(email.eq(user_email?)).load::<User>(conn) {
Ok(r) => r,
Err(_e) => Vec::new(),
};
users_data.pop()
}
|
fn print_double(mut x: f64) {
x *= 2.;
print!("{}", x);
}
fn main() {
let x = 4.;
print_double(x);
println!(" {}", x);
}
|
use diesel::backend::Backend;
use diesel::deserialize::{self, FromSql};
use diesel::prelude::*;
use diesel::serialize::{self, Output, ToSql};
use diesel::sql_types::*;
use std::io;
use uuid::Uuid;
use crate::core::schema::account;
use crate::core::{generate_uuid, DbConnection, Money, ServiceError, ServiceResult};
/// Represent a account
#[derive(
Debug,
Queryable,
Insertable,
Identifiable,
AsChangeset,
PartialEq,
Eq,
Hash,
Serialize,
Deserialize,
Clone,
)]
#[changeset_options(treat_none_as_null = "true")]
#[table_name = "account"]
pub struct Account {
pub id: Uuid,
pub credit: Money,
pub minimum_credit: Money,
pub name: String,
pub mail: Option<String>,
pub username: Option<String>,
pub account_number: Option<String>,
pub permission: Permission,
/// Whether the user want's to receive a monthly report about his/her/* account activities
pub receives_monthly_report: bool,
}
/// Represents the permission level of an account
#[derive(
Debug, Copy, Clone, FromSqlRow, AsExpression, Hash, PartialEq, Eq, Serialize, Deserialize,
)]
#[sql_type = "SmallInt"]
pub enum Permission {
/// default user without the ability to edit anything
DEFAULT,
/// ascii member who can perform transactions
MEMBER,
/// ascii executive or admin who can do everything
ADMIN,
}
impl PartialOrd for Permission {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Permission {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.level().cmp(&other.level())
}
}
impl Permission {
/// Check if the permission level is `Permission::DEFAULT`
pub fn is_default(self) -> bool {
Permission::DEFAULT == self
}
/// Check if the permission level is `Permission::MEMBER`
pub fn is_member(self) -> bool {
Permission::MEMBER == self
}
/// Check if the permission level is `Permission::ADMIN`
pub fn is_admin(self) -> bool {
Permission::ADMIN == self
}
pub fn level(self) -> u32 {
match self {
Permission::DEFAULT => 0,
Permission::MEMBER => 1,
Permission::ADMIN => 2,
}
}
}
/// For manuel database convertion
impl<DB: Backend> ToSql<SmallInt, DB> for Permission
where
i16: ToSql<SmallInt, DB>,
{
fn to_sql<W>(&self, out: &mut Output<W, DB>) -> serialize::Result
where
W: io::Write,
{
let v = match *self {
Permission::DEFAULT => 0,
Permission::MEMBER => 1,
Permission::ADMIN => 2,
};
v.to_sql(out)
}
}
/// For manuel database convertion
impl<DB: Backend> FromSql<SmallInt, DB> for Permission
where
i16: FromSql<SmallInt, DB>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
let v = i16::from_sql(bytes)?;
Ok(match v {
0 => Permission::DEFAULT,
1 => Permission::MEMBER,
2 => Permission::ADMIN,
_ => panic!("'{}' is not a valid permission!", &v),
})
}
}
impl Account {
/// Create a new account with the given permission level
pub fn create(
conn: &DbConnection,
name: &str,
permission: Permission,
) -> ServiceResult<Account> {
use crate::core::schema::account::dsl;
let a = Account {
id: generate_uuid(),
credit: 0,
minimum_credit: 0,
name: name.to_owned(),
mail: None,
username: None,
account_number: None,
permission,
receives_monthly_report: false,
};
if !a.exist_conficting_account(conn)? {
return Err(ServiceError::InternalServerError(
"Conficting account settings",
"The given account settings conflict with the other existing accounts".to_owned(),
));
}
diesel::insert_into(dsl::account).values(&a).execute(conn)?;
Ok(a)
}
/// Save the current account data to the database
pub fn update(&self, conn: &DbConnection) -> ServiceResult<()> {
use crate::core::schema::account::dsl;
if !self.exist_conficting_account(conn)? {
return Err(ServiceError::InternalServerError(
"Conficting account settings",
"The given account settings conflict with the other existing accounts".to_owned(),
));
}
diesel::update(dsl::account.find(&self.id))
.set(self)
.execute(conn)?;
Ok(())
}
/// List all accounts
pub fn all(conn: &DbConnection) -> ServiceResult<Vec<Account>> {
use crate::core::schema::account::dsl;
let results = dsl::account.order(dsl::name.asc()).load::<Account>(conn)?;
Ok(results)
}
/// Get an account by the `id`
pub fn get(conn: &DbConnection, id: &Uuid) -> ServiceResult<Account> {
use crate::core::schema::account::dsl;
let mut results = dsl::account.filter(dsl::id.eq(id)).load::<Account>(conn)?;
results.pop().ok_or_else(|| ServiceError::NotFound)
}
/// Get an account by the `id`
pub fn find_by_login(conn: &DbConnection, login: &str) -> ServiceResult<Account> {
use crate::core::schema::account::dsl;
let mut results = match Uuid::parse_str(login) {
Ok(uuid) => dsl::account
.filter(dsl::id.eq(uuid))
.load::<Account>(conn)?,
Err(_) => dsl::account
.filter(
dsl::mail
.eq(login)
.or(dsl::username.eq(login))
.or(dsl::account_number.eq(login)),
)
.load::<Account>(conn)?,
};
if results.len() > 1 {
return Err(ServiceError::NotFound);
}
results.pop().ok_or_else(|| ServiceError::NotFound)
}
fn exist_conficting_account(&self, conn: &DbConnection) -> ServiceResult<bool> {
use crate::core::schema::account::dsl;
if let Some(mail) = &self.mail {
let results = dsl::account
.filter(dsl::id.ne(self.id).and(dsl::mail.eq(mail)))
.load::<Account>(conn)?;
if !results.is_empty() {
return Ok(false);
}
}
if let Some(username) = &self.username {
let results = dsl::account
.filter(dsl::id.ne(self.id).and(dsl::username.eq(username)))
.load::<Account>(conn)?;
if !results.is_empty() {
return Ok(false);
}
}
if let Some(account_number) = &self.account_number {
let results = dsl::account
.filter(
dsl::id
.ne(self.id)
.and(dsl::account_number.eq(account_number)),
)
.load::<Account>(conn)?;
if !results.is_empty() {
return Ok(false);
}
}
Ok(true)
}
}
|
// This file is part of Substrate.
// Copyright (C) 2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Proc macro for a npos compact assignment.
use proc_macro::TokenStream;
use proc_macro2::{Ident, Span, TokenStream as TokenStream2};
use proc_macro_crate::crate_name;
use quote::quote;
use syn::parse::{Parse, ParseStream, Result};
mod assignment;
mod codec;
// prefix used for struct fields in compact.
const PREFIX: &'static str = "votes";
pub(crate) fn syn_err(message: &'static str) -> syn::Error {
syn::Error::new(Span::call_site(), message)
}
/// Generates a struct to store the election result in a small way. This can encode a structure
/// which is the equivalent of a `sp_npos_elections::Assignment<_>`.
///
/// The following data types can be configured by the macro.
///
/// - The identifier of the voter. This can be any type that supports `parity-scale-codec`'s compact
/// encoding.
/// - The identifier of the target. This can be any type that supports `parity-scale-codec`'s
/// compact encoding.
/// - The accuracy of the ratios. This must be one of the `PerThing` types defined in
/// `sp-arithmetic`.
///
/// Moreover, the maximum number of edges per voter (distribution per assignment) also need to be
/// specified. Attempting to convert from/to an assignment with more distributions will fail.
///
///
/// For example, the following generates a public struct with name `TestSolution` with `u16` voter
/// type, `u8` target type and `Perbill` accuracy with maximum of 8 edges per voter.
///
/// ```ignore
/// generate_solution_type!(pub struct TestSolution<u16, u8, Perbill>::(8))
/// ```
///
/// The given struct provides function to convert from/to Assignment:
///
/// - [`from_assignment()`].
/// - [`fn into_assignment()`].
///
/// The generated struct is by default deriving both `Encode` and `Decode`. This is okay but could
/// lead to many 0s in the solution. If prefixed with `#[compact]`, then a custom compact encoding
/// for numbers will be used, similar to how `parity-scale-codec`'s `Compact` works.
///
/// ```ignore
/// generate_solution_type!(
/// #[compact]
/// pub struct TestSolutionCompact<u16, u8, Perbill>::(8)
/// )
/// ```
#[proc_macro]
pub fn generate_solution_type(item: TokenStream) -> TokenStream {
let SolutionDef { vis, ident, count, voter_type, target_type, weight_type, compact_encoding } =
syn::parse_macro_input!(item as SolutionDef);
let imports = imports().unwrap_or_else(|e| e.to_compile_error());
let solution_struct = struct_def(
vis,
ident.clone(),
count,
voter_type.clone(),
target_type.clone(),
weight_type.clone(),
compact_encoding,
)
.unwrap_or_else(|e| e.to_compile_error());
let assignment_impls = assignment::assignment(
ident.clone(),
voter_type.clone(),
target_type.clone(),
weight_type.clone(),
count,
);
quote!(
#imports
#solution_struct
#assignment_impls
)
.into()
}
fn struct_def(
vis: syn::Visibility,
ident: syn::Ident,
count: usize,
voter_type: syn::Type,
target_type: syn::Type,
weight_type: syn::Type,
compact_encoding: bool,
) -> Result<TokenStream2> {
if count <= 2 {
Err(syn_err("cannot build compact solution struct with capacity less than 3."))?
}
let singles = {
let name = field_name_for(1);
quote!(
#name: Vec<(#voter_type, #target_type)>,
)
};
let doubles = {
let name = field_name_for(2);
quote!(
#name: Vec<(#voter_type, (#target_type, #weight_type), #target_type)>,
)
};
let rest = (3..=count)
.map(|c| {
let field_name = field_name_for(c);
let array_len = c - 1;
quote!(
#field_name: Vec<(
#voter_type,
[(#target_type, #weight_type); #array_len],
#target_type
)>,
)
})
.collect::<TokenStream2>();
let len_impl = len_impl(count);
let edge_count_impl = edge_count_impl(count);
let unique_targets_impl = unique_targets_impl(count);
let remove_voter_impl = remove_voter_impl(count);
let derives_and_maybe_compact_encoding = if compact_encoding {
// custom compact encoding.
let compact_impl = codec::codec_impl(
ident.clone(),
voter_type.clone(),
target_type.clone(),
weight_type.clone(),
count,
);
quote! {
#compact_impl
#[derive(Default, PartialEq, Eq, Clone, Debug)]
}
} else {
// automatically derived.
quote!(#[derive(Default, PartialEq, Eq, Clone, Debug, _npos::codec::Encode, _npos::codec::Decode)])
};
Ok(quote! (
/// A struct to encode a election assignment in a compact way.
#derives_and_maybe_compact_encoding
#vis struct #ident { #singles #doubles #rest }
impl _npos::VotingLimit for #ident {
const LIMIT: usize = #count;
}
impl #ident {
/// Get the length of all the assignments that this type is encoding. This is basically
/// the same as the number of assignments, or the number of voters in total.
pub fn len(&self) -> usize {
let mut all_len = 0usize;
#len_impl
all_len
}
/// Get the total count of edges.
pub fn edge_count(&self) -> usize {
let mut all_edges = 0usize;
#edge_count_impl
all_edges
}
/// Get the number of unique targets in the whole struct.
///
/// Once presented with a list of winners, this set and the set of winners must be
/// equal.
///
/// The resulting indices are sorted.
pub fn unique_targets(&self) -> Vec<#target_type> {
let mut all_targets: Vec<#target_type> = Vec::with_capacity(self.average_edge_count());
let mut maybe_insert_target = |t: #target_type| {
match all_targets.binary_search(&t) {
Ok(_) => (),
Err(pos) => all_targets.insert(pos, t)
}
};
#unique_targets_impl
all_targets
}
/// Get the average edge count.
pub fn average_edge_count(&self) -> usize {
self.edge_count().checked_div(self.len()).unwrap_or(0)
}
/// Remove a certain voter.
///
/// This will only search until the first instance of `to_remove`, and return true. If
/// no instance is found (no-op), then it returns false.
///
/// In other words, if this return true, exactly one element must have been removed from
/// `self.len()`.
pub fn remove_voter(&mut self, to_remove: #voter_type) -> bool {
#remove_voter_impl
return false
}
}
))
}
fn remove_voter_impl(count: usize) -> TokenStream2 {
let field_name = field_name_for(1);
let single = quote! {
if let Some(idx) = self.#field_name.iter().position(|(x, _)| *x == to_remove) {
self.#field_name.remove(idx);
return true
}
};
let field_name = field_name_for(2);
let double = quote! {
if let Some(idx) = self.#field_name.iter().position(|(x, _, _)| *x == to_remove) {
self.#field_name.remove(idx);
return true
}
};
let rest = (3..=count)
.map(|c| {
let field_name = field_name_for(c);
quote! {
if let Some(idx) = self.#field_name.iter().position(|(x, _, _)| *x == to_remove) {
self.#field_name.remove(idx);
return true
}
}
})
.collect::<TokenStream2>();
quote! {
#single
#double
#rest
}
}
fn len_impl(count: usize) -> TokenStream2 {
(1..=count)
.map(|c| {
let field_name = field_name_for(c);
quote!(
all_len = all_len.saturating_add(self.#field_name.len());
)
})
.collect::<TokenStream2>()
}
fn edge_count_impl(count: usize) -> TokenStream2 {
(1..=count)
.map(|c| {
let field_name = field_name_for(c);
quote!(
all_edges = all_edges.saturating_add(
self.#field_name.len().saturating_mul(#c as usize)
);
)
})
.collect::<TokenStream2>()
}
fn unique_targets_impl(count: usize) -> TokenStream2 {
let unique_targets_impl_single = {
let field_name = field_name_for(1);
quote! {
self.#field_name.iter().for_each(|(_, t)| {
maybe_insert_target(*t);
});
}
};
let unique_targets_impl_double = {
let field_name = field_name_for(2);
quote! {
self.#field_name.iter().for_each(|(_, (t1, _), t2)| {
maybe_insert_target(*t1);
maybe_insert_target(*t2);
});
}
};
let unique_targets_impl_rest = (3..=count)
.map(|c| {
let field_name = field_name_for(c);
quote! {
self.#field_name.iter().for_each(|(_, inners, t_last)| {
inners.iter().for_each(|(t, _)| {
maybe_insert_target(*t);
});
maybe_insert_target(*t_last);
});
}
})
.collect::<TokenStream2>();
quote! {
#unique_targets_impl_single
#unique_targets_impl_double
#unique_targets_impl_rest
}
}
fn imports() -> Result<TokenStream2> {
if std::env::var("CARGO_PKG_NAME").unwrap() == "sp-npos-elections" {
Ok(quote! {
use crate as _npos;
})
} else {
match crate_name("sp-npos-elections") {
Ok(sp_npos_elections) => {
let ident = syn::Ident::new(&sp_npos_elections, Span::call_site());
Ok(quote!( extern crate #ident as _npos; ))
},
Err(e) => Err(syn::Error::new(Span::call_site(), &e)),
}
}
}
struct SolutionDef {
vis: syn::Visibility,
ident: syn::Ident,
voter_type: syn::Type,
target_type: syn::Type,
weight_type: syn::Type,
count: usize,
compact_encoding: bool,
}
fn check_compact_attr(input: ParseStream) -> Result<bool> {
let mut attrs = input.call(syn::Attribute::parse_outer).unwrap_or_default();
if attrs.len() == 1 {
let attr = attrs.pop().expect("Vec with len 1 can be popped.");
if attr.path.segments.len() == 1 {
let segment = attr.path.segments.first().expect("Vec with len 1 can be popped.");
if segment.ident == Ident::new("compact", Span::call_site()) {
Ok(true)
} else {
Err(syn_err("generate_solution_type macro can only accept #[compact] attribute."))
}
} else {
Err(syn_err("generate_solution_type macro can only accept #[compact] attribute."))
}
} else {
Ok(false)
}
}
/// #[compact] pub struct CompactName::<u32, u32, u32>()
impl Parse for SolutionDef {
fn parse(input: ParseStream) -> syn::Result<Self> {
// optional #[compact]
let compact_encoding = check_compact_attr(input)?;
// <vis> struct <name>
let vis: syn::Visibility = input.parse()?;
let _ = <syn::Token![struct]>::parse(input)?;
let ident: syn::Ident = input.parse()?;
// ::<V, T, W>
let _ = <syn::Token![::]>::parse(input)?;
let generics: syn::AngleBracketedGenericArguments = input.parse()?;
if generics.args.len() != 3 {
return Err(syn_err("Must provide 3 generic args."))
}
let mut types: Vec<syn::Type> = generics
.args
.iter()
.map(|t| match t {
syn::GenericArgument::Type(ty) => Ok(ty.clone()),
_ => Err(syn_err("Wrong type of generic provided. Must be a `type`.")),
})
.collect::<Result<_>>()?;
let weight_type = types.pop().expect("Vector of length 3 can be popped; qed");
let target_type = types.pop().expect("Vector of length 2 can be popped; qed");
let voter_type = types.pop().expect("Vector of length 1 can be popped; qed");
// (<count>)
let count_expr: syn::ExprParen = input.parse()?;
let expr = count_expr.expr;
let expr_lit = match *expr {
syn::Expr::Lit(count_lit) => count_lit.lit,
_ => return Err(syn_err("Count must be literal.")),
};
let int_lit = match expr_lit {
syn::Lit::Int(int_lit) => int_lit,
_ => return Err(syn_err("Count must be int literal.")),
};
let count = int_lit.base10_parse::<usize>()?;
Ok(Self { vis, ident, voter_type, target_type, weight_type, count, compact_encoding })
}
}
fn field_name_for(n: usize) -> Ident {
Ident::new(&format!("{}{}", PREFIX, n), Span::call_site())
}
|
use raylib::prelude::*;
use specs::prelude::*;
use std::sync::{Arc, Mutex};
pub struct ModelComponent {
pub model : Model,
}
unsafe impl Send for ModelComponent{}
unsafe impl Sync for ModelComponent{}
impl Component for ModelComponent {
type Storage = VecStorage<Self>;
}
struct CameraComponent {
camera : Camera,
}
|
fn make_change(coins: &[usize], cents: usize) -> usize {
let size = cents + 1;
let mut ways = vec![0; size];
ways[0] = 1;
for &coin in coins {
for amount in coin..size {
ways[amount] += ways[amount - coin];
}
}
ways[cents]
}
fn main() {
println!("{}", make_change(&[1,5,10,25], 100));
println!("{}", make_change(&[1,5,10,25,50,100], 100_000));
}
|
use vek::Vec2;
use specs::prelude::*;
use crate::components::*;
pub struct Physics;
impl<'a> System<'a> for Physics {
type SystemData = (WriteStorage<'a, Position>, ReadStorage<'a, Velocity>);
fn run(&mut self, (mut positions, velocities): Self::SystemData) {
for (Position(pos), vel) in (&mut positions, &velocities).join() {
let speed = vel.speed as f64;
// Need to flip the y-axis sign because the y-axis goes down
let direction = Vec2 {x: vel.angle.cos(), y: -vel.angle.sin()};
let next_pos = *pos + direction * speed;
*pos = next_pos;
}
}
}
|
use crate::RrtVec3::Vec3;
pub fn write_color(v: Vec3) {
println!("{} {} {}", (255.999 * v.x()) as i64, (255.999 * v.y()) as i64, (255.999 * v.z()) as i64);
} |
pub struct Primes {
previous: Vec<usize>,
}
impl Primes {
pub fn new() -> Primes {
Primes{ previous: Vec::new() }
}
}
impl Iterator for Primes {
type Item = usize;
fn next(&mut self) -> Option<usize> {
let start = match self.previous.last() {
Some(&n) => n+1,
None => 2,
};
let mut next_prime = 0;
for n in start.. {
let isqrn = (n as f64).sqrt().floor() as usize;
if self.previous.iter().take_while(|k| **k <= isqrn).all(|k| n%k > 0) {
next_prime = n;
break;
}
}
self.previous.push(next_prime);
Some(next_prime)
}
}
|
use serde::{Deserialize, Serialize};
use super::category_taxonomy;
use super::{content, publisher};
use crate::openrtb3::bool;
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct App {
id: Option<String>,
name: Option<String>,
#[serde(rename = "pub")]
publisher: Option<publisher::Publisher>,
content: Option<content::Content>,
domain: Option<String>,
cat: Vec<String>,
sectcat: Vec<String>,
pagecat: Vec<String>,
cattax: Option<category_taxonomy::CategoryTaxonomy>,
privpolicy: Option<bool::Bool>,
keywords: Option<String>,
bundle: Option<String>,
storeid: Option<String>,
#[serde(rename = "ver")]
app_version: Option<String>,
#[serde(default = "bool::Bool::default_false")]
is_paid: bool::Bool,
ext: Option<AppExt>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct AppExt {}
|
use actix_cors::Cors;
use actix_web::{web, App, HttpServer};
use config::Config;
use std::collections::HashMap;
mod order;
mod utils;
pub const ADDR: &str = "0.0.0.0:8004";
lazy_static::lazy_static! {
static ref SECRETS: HashMap<String, String> = {
let mut config = Config::default();
config.merge(config::File::with_name("secrets")).unwrap();
config.try_into::<HashMap<String, String>>().unwrap()
};
}
fn main() {
println!("Listening on {}", ADDR);
HttpServer::new(|| {
App::new()
.wrap(Cors::new())
.service(web::scope("/orders")
.route("", web::post().to(order::place::place))
.route("/{id}", web::put().to(order::mark::mark)))
})
.bind(ADDR)
.unwrap()
.run()
.unwrap();
}
|
extern crate rand;
extern crate sarkara;
use rand::{thread_rng, Rng, ThreadRng};
use sarkara::aead::AeadCipher;
use sarkara::kex::KeyExchange;
use sarkara::sealedbox::SealedBox;
use sarkara::kex::kyber::Kyber;
use sarkara::aead::sparx256colm0::Sparx256Colm0;
use sarkara::aead::norx6441::Norx;
fn test_sealedbox<K: KeyExchange, AE: AeadCipher>() {
let (bob_priv, bob_pub) = K::keypair(thread_rng());
let (alice_msg, alice_ae) = K::send::<ThreadRng, AE>(thread_rng(), &bob_pub);
let bob_ae = K::recv::<AE>(&bob_priv, &alice_msg);
for i in 1..65 {
let mut nonce = vec![0; AE::NONCE_LENGTH];
let aad = vec![0; thread_rng().gen_range(0, 34)];
let mut pt = vec![0; i];
let mut ct = vec![0; pt.len() + AE::TAG_LENGTH];
let mut ot = vec![0; pt.len()];
thread_rng().fill_bytes(&mut nonce);
thread_rng().fill_bytes(&mut pt);
alice_ae.seal(&nonce, &aad, &pt, &mut ct).unwrap();
let r = bob_ae.open(&nonce, &aad, &ct, &mut ot).unwrap();
assert!(r);
assert_eq!(pt, ot);
}
}
#[test]
fn test_kyber_sparx256colm0() {
test_sealedbox::<Kyber, Sparx256Colm0>();
}
#[test]
fn test_kyber_norx() {
test_sealedbox::<Kyber, Norx>();
}
|
use select::predicate::*;
use select::document::Document;
use regex::Regex;
use std::fs::File;
use std::io::BufReader;
/// Reads in an html document from `in_file`, removes html tags, optionally does
/// case folding and punctuation stripping, writing result file to `out_file`.
pub fn parse(in_file: File, case_fold: bool, strip_punc: bool) -> String{
let doc = Document::from_read(BufReader::new(in_file)).unwrap();
let html_stripped = strip_html(doc);
let punct_case_folded = format(html_stripped, case_fold, strip_punc);
let res = remove_trailing_numbers(punct_case_folded);
res
}
/// Assumption made for this project: all documents have exactly one pre tag
/// and all text is within that tag.
fn strip_html(doc: Document) -> String {
if doc.find(Name("pre")).count() != 1 {
panic!("Expected HTML to have exectly one pre tag, it does not");
}
doc.find(Name("pre")).nth(0).unwrap().text()
}
/// Optionally removes punctuation and converts string to lowercase
/// depending on provided params.
fn format(line: String, case_fold: bool, strip_punc: bool) -> String {
let re_punct_to_space = Regex::new(r"[-,]").unwrap();
let re_punct_remove = Regex::new(r"[\W&&\S]").unwrap();
let re_whitespace = Regex::new(r"\s+").unwrap();
let mut s = line.clone();
if strip_punc {
s = String::from(re_punct_to_space.replace_all(s.as_str(), " "));
s = String::from(re_punct_remove.replace_all(s.as_str(), ""));
s = String::from(re_whitespace.replace_all(s.as_str(), " "));
}
if case_fold {
s = s.to_lowercase();
}
s
}
/// Removes the trailing numbers at the end of any document
pub fn remove_trailing_numbers(contents: String) -> String {
let mut term_iter = contents.split_whitespace().rev();
let mut n = term_iter.next().expect("Document is empty");
let mut still_number = n.parse::<usize>().is_ok();
while still_number {
if let Some(next) = term_iter.next() {
n = next;
still_number = n.parse::<usize>().is_ok();
}
}
let mut ret = String::new();
for term in term_iter.rev() {
ret.push(' ');
ret.push_str(term);
}
ret.push(' ');
ret.push_str(n);
ret
}
|
use log::{debug, error, info, trace};
use super::*;
use crate::db;
use crate::prelude::*;
use dotenv::dotenv;
use postgres::{Connection, TlsMode};
use rayon::prelude::*;
use std::collections::{HashMap, HashSet};
use std::sync::{Arc, Mutex};
use std::time::Instant;
use std::{env, fmt::Write};
pub fn establish_connection() -> Result<Connection> {
dotenv()?;
let database_url = env::var("DATABASE_URL")?;
Ok(Connection::connect(database_url, TlsMode::None)?)
}
fn create_bulk_insert_blocks_query(blocks: &[db::Block]) -> Vec<String> {
if blocks.is_empty() {
return vec![];
}
if blocks.len() > 9000 {
let mid = blocks.len() / 2;
let mut p1 = create_bulk_insert_blocks_query(&blocks[0..mid]);
let mut p2 = create_bulk_insert_blocks_query(&blocks[mid..blocks.len()]);
p1.append(&mut p2);
return p1;
}
let mut q: String =
"INSERT INTO blocks (height, hash, prev_hash, merkle_root, time) VALUES".into();
for (i, block) in blocks.iter().enumerate() {
if i > 0 {
q.push_str(",")
}
q.write_fmt(format_args!(
"({},'\\x{}','\\x{}','\\x{}', {})",
block.height, block.hash, block.prev_hash, block.merkle_root, block.time
))
.unwrap();
}
q.write_str(";").expect("Write to string can't fail");
return vec![q];
}
fn create_bulk_insert_txs_query(txs: &[Tx], block_ids: &HashMap<BlockHash, i64>) -> Vec<String> {
if txs.is_empty() {
return vec![];
}
if txs.len() > 9000 {
let mid = txs.len() / 2;
let mut p1 = create_bulk_insert_txs_query(&txs[0..mid], block_ids);
let mut p2 = create_bulk_insert_txs_query(&txs[mid..txs.len()], block_ids);
p1.append(&mut p2);
return p1;
}
let mut q: String = "INSERT INTO txs (block_id, hash, coinbase) VALUES".into();
for (i, tx) in txs.iter().enumerate() {
if i > 0 {
q.push_str(",")
}
q.write_fmt(format_args!(
"({},'\\x{}',{})",
block_ids[&tx.block_hash], tx.hash, tx.coinbase,
))
.unwrap();
}
q.write_str(";").expect("Write to string can't fail");
return vec![q];
}
fn create_bulk_insert_outputs_query(
outputs: &[Output],
tx_ids: &HashMap<TxHash, i64>,
) -> Vec<String> {
if outputs.is_empty() {
return vec![];
}
if outputs.len() > 9000 {
let mid = outputs.len() / 2;
let mut p1 = create_bulk_insert_outputs_query(&outputs[0..mid], tx_ids);
let mut p2 = create_bulk_insert_outputs_query(&outputs[mid..outputs.len()], tx_ids);
p1.append(&mut p2);
return p1;
}
let mut q: String =
"INSERT INTO outputs (tx_id, tx_idx, value, address, coinbase) VALUES ".into();
for (i, output) in outputs.iter().enumerate() {
if i > 0 {
q.push_str(",")
}
q.write_fmt(format_args!(
"({},{},{},{},{})",
tx_ids[&output.out_point.txid],
output.out_point.vout,
output.value,
output
.address
.as_ref()
.map_or("null".into(), |s| format!("'{}'", s)),
output.coinbase,
))
.unwrap();
}
q.write_str(";").expect("Write to string can't fail");
return vec![q];
}
fn create_bulk_insert_inputs_query(
inputs: &[Input],
outputs: &HashMap<OutPoint, UtxoSetEntry>,
tx_ids: &HashMap<TxHash, i64>,
) -> Vec<String> {
if inputs.is_empty() {
return vec![];
}
if inputs.len() > 9000 {
let mid = inputs.len() / 2;
let mut p1 = create_bulk_insert_inputs_query(&inputs[0..mid], outputs, tx_ids);
let mut p2 = create_bulk_insert_inputs_query(&inputs[mid..inputs.len()], outputs, tx_ids);
p1.append(&mut p2);
return p1;
}
let mut q: String = "INSERT INTO inputs (output_id, tx_id) VALUES ".into();
for (i, input) in inputs.iter().enumerate() {
if i > 0 {
q.push_str(",")
}
q.write_fmt(format_args!(
"({},{})",
outputs[&input.out_point].id, tx_ids[&input.tx_id]
))
.unwrap();
}
q.write_str(";").expect("Write to string can't fail");
vec![q]
}
fn crate_fetch_outputs_query(outputs: &[OutPoint]) -> Vec<String> {
if outputs.len() > 1500 {
let mid = outputs.len() / 2;
let mut p1 = crate_fetch_outputs_query(&outputs[0..mid]);
let mut p2 = crate_fetch_outputs_query(&outputs[mid..outputs.len()]);
p1.append(&mut p2);
return p1;
}
let mut q: String = "SELECT outputs.id, outputs.value, txs.hash, outputs.tx_idx FROM outputs JOIN txs ON (txs.id = outputs.tx_id) JOIN blocks ON txs.block_id = blocks.id WHERE blocks.orphaned = false AND (txs.hash, outputs.tx_idx) IN ( VALUES ".into();
for (i, output) in outputs.iter().enumerate() {
if i > 0 {
q.push_str(",")
}
q.write_fmt(format_args!(
"('\\x{}'::bytea,{})",
output.txid, output.vout
))
.unwrap();
}
q.write_str(" );").expect("Write to string can't fail");
vec![q]
}
#[derive(Copy, Clone, PartialEq, Eq)]
struct UtxoSetEntry {
id: i64,
value: u64,
}
#[derive(Default)]
/// Cache of utxo set
struct UtxoSetCache {
entries: HashMap<OutPoint, UtxoSetEntry>,
}
impl UtxoSetCache {
fn insert(&mut self, point: OutPoint, id: i64, value: u64) {
self.entries.insert(point, UtxoSetEntry { id, value });
}
/// Consume `outputs`
///
/// Returns:
/// * Mappings for Outputs that were found
/// * Vector of outputs that were missing from the set
fn consume(
&mut self,
outputs: impl Iterator<Item = OutPoint>,
) -> (HashMap<OutPoint, UtxoSetEntry>, Vec<OutPoint>) {
let mut found = HashMap::default();
let mut missing = vec![];
for output in outputs {
match self.entries.remove(&output) {
Some(details) => {
found.insert(output, details);
}
None => missing.push(output),
}
}
(found, missing)
}
fn fetch_missing(
conn: &Connection,
missing: Vec<OutPoint>,
) -> Result<HashMap<OutPoint, UtxoSetEntry>> {
let missing_len = missing.len();
debug!("Fetching {} missing outputs", missing_len);
let mut out = HashMap::default();
if missing.is_empty() {
return Ok(HashMap::default());
}
let start = Instant::now();
let missing: Vec<_> = missing.into_iter().collect();
for q in crate_fetch_outputs_query(&missing) {
for row in &conn.query(&q, &[])? {
let tx_hash = {
let mut human_bytes = row.get::<_, Vec<u8>>(2);
human_bytes.reverse();
BlockHash::from(human_bytes.as_slice())
};
out.insert(
OutPoint {
txid: tx_hash,
vout: row.get::<_, i32>(3) as u32,
},
UtxoSetEntry {
id: row.get(0),
value: row.get::<_, i64>(1) as u64,
},
);
}
}
trace!(
"Fetched {} missing outputs in {}s",
missing_len,
Instant::now().duration_since(start).as_secs()
);
Ok(out)
}
}
fn read_next_id(conn: &Connection, table_name: &str, id_col_name: &str) -> Result<i64> {
// explanation: https://dba.stackexchange.com/a/78228
let q = format!(
"select setval(pg_get_serial_sequence('{table}', '{id_col}'), GREATEST(nextval(pg_get_serial_sequence('{table}', '{id_col}')) - 1, 1)) as id",
table = table_name,
id_col = id_col_name
);
const PG_STARTING_ID: i64 = 1;
Ok(conn
.query(&q, &[])?
.iter()
.next()
.expect("at least one row")
.get::<_, Option<i64>>(0)
.map(|v| v + 1)
.unwrap_or(PG_STARTING_ID))
}
fn execute_bulk_insert_transcation(
conn: &Connection,
name: &str,
len: usize,
batch_id: u64,
queries: impl Iterator<Item = String>,
) -> Result<()> {
trace!("Inserting {} {} from batch {}...", len, name, batch_id);
let start = Instant::now();
let transaction = conn.transaction()?;
for s in queries {
transaction.batch_execute(&s)?;
}
transaction.commit()?;
trace!(
"Inserted {} {} from batch {} in {}s",
len,
name,
batch_id,
Instant::now().duration_since(start).as_secs()
);
Ok(())
}
fn read_next_tx_id(conn: &Connection) -> Result<i64> {
read_next_id(conn, "txs", "id")
}
fn read_next_output_id(conn: &Connection) -> Result<i64> {
read_next_id(conn, "outputs", "id")
}
fn read_next_block_id(conn: &Connection) -> Result<i64> {
read_next_id(conn, "blocks", "id")
}
type BlocksInFlight = HashSet<BlockHash>;
/// Worker Pipepline
///
/// `Pipeline` is reponsible for actually inserting data into the db.
/// It is split between multiple threads - each handling one table.
/// The idea here is to have some level of paralelism to help saturate
/// network IO, and then disk IO. As each thread touches only one
/// table - there is no contention between them.
///
/// `Pipeline` name comes from the fact that each thread does its job
/// and passes rest of the data to the next one.
///
/// A lot of stuff here about performance is actually speculative,
/// but there is only so many hours in a day, and it seems to work well
/// in practice.
///
/// In as `atomic` mode, last thread inserts entire data in one transaction
/// to prevent temporary inconsistency (eg. txs inserted, but blocks not yet).
/// It is to be used in non-bulk mode, when blocks are indexed one at the time,
/// so performance is not important. Passing formatted queries around is a compromise
/// between having two different versions of this logic, and good performance
/// in bulk mode.
struct Pipeline {
tx: Option<crossbeam_channel::Sender<(u64, Vec<Parsed>)>>,
txs_thread: Option<std::thread::JoinHandle<Result<()>>>,
outputs_thread: Option<std::thread::JoinHandle<Result<()>>>,
inputs_thread: Option<std::thread::JoinHandle<Result<()>>>,
blocks_thread: Option<std::thread::JoinHandle<Result<()>>>,
}
// TODO: fail the whole Pipeline somehow
fn fn_log_err<F>(name: &'static str, mut f: F) -> impl FnMut() -> Result<()>
where
F: FnMut() -> Result<()>,
{
move || {
let res = f();
if let Err(ref e) = res {
error!("{} finished with an error: {}", name, e);
}
res
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
enum Mode {
FreshBulk,
Bulk,
Normal,
}
impl Mode {
fn is_bulk(self) -> bool {
match self {
Mode::FreshBulk => true,
Mode::Bulk => true,
Mode::Normal => false,
}
}
fn is_not_fresh_bulk(self) -> bool {
match self {
Mode::FreshBulk => false,
Mode::Bulk => true,
Mode::Normal => false,
}
}
fn to_sql_query_str(self) -> &'static str {
match self {
Mode::FreshBulk => include_str!("pg/mode_fresh.sql"),
Mode::Bulk => include_str!("pg/mode_bulk.sql"),
Mode::Normal => include_str!("pg/mode_normal.sql"),
}
}
fn to_entering_str(self) -> &'static str {
match self {
Mode::FreshBulk => "fresh mode: no indices",
Mode::Bulk => "fresh mode: minimum indices",
Mode::Normal => "normal mode: all indices",
}
}
}
impl Pipeline {
fn new(in_flight: Arc<Mutex<BlocksInFlight>>, mode: Mode) -> Result<Self> {
// We use only rendezvous (0-size) channels, to allow passing
// work and parallelism, but without doing any buffering of
// work in the channels. Buffered work does not
// improve performance, and more things in flight means
// incrased memory usage.
let (tx, blocks_rx) = crossbeam_channel::bounded::<(u64, Vec<Parsed>)>(0);
let (blocks_tx, txs_rx) = crossbeam_channel::bounded::<(
u64,
HashMap<BlockHash, i64>,
Vec<Tx>,
Vec<Output>,
Vec<Input>,
u64,
Vec<Vec<String>>,
)>(0);
let (txs_tx, outputs_rx) = crossbeam_channel::bounded::<(
u64,
HashMap<BlockHash, i64>,
HashMap<TxHash, i64>,
Vec<Output>,
Vec<Input>,
u64,
Vec<Vec<String>>,
)>(0);
let (outputs_tx, inputs_rx) = crossbeam_channel::bounded::<(
u64,
HashMap<BlockHash, i64>,
HashMap<TxHash, i64>,
Vec<Input>,
u64,
Vec<Vec<String>>,
)>(0);
let utxo_set_cache = Arc::new(Mutex::new(UtxoSetCache::default()));
let blocks_thread = std::thread::spawn({
let conn = establish_connection()?;
fn_log_err("db_worker_blocks", move || {
let mut next_id = read_next_block_id(&conn)?;
while let Ok((batch_id, parsed)) = blocks_rx.recv() {
let mut blocks: Vec<super::Block> = vec![];
let mut txs: Vec<super::Tx> = vec![];
let mut outputs: Vec<super::Output> = vec![];
let mut inputs: Vec<super::Input> = vec![];
let mut pending_queries = vec![];
for mut parsed in parsed {
blocks.push(parsed.block);
txs.append(&mut parsed.txs);
outputs.append(&mut parsed.outputs);
inputs.append(&mut parsed.inputs);
}
let max_block_height = blocks
.iter()
.rev()
.next()
.map(|b| b.height)
.expect("at least one block");
let min_block_height = blocks
.iter()
.next()
.map(|b| b.height)
.expect("at least one block");
let blocks_len = blocks.len();
let insert_queries = create_bulk_insert_blocks_query(&blocks);
let reorg_queries = vec![format!(
"UPDATE blocks SET orphaned = true WHERE height >= {};",
min_block_height
)];
if !mode.is_bulk() {
pending_queries.push(reorg_queries);
pending_queries.push(insert_queries);
} else {
assert_eq!(next_id, read_next_block_id(&conn)?);
execute_bulk_insert_transcation(
&conn,
"blocks",
blocks.len(),
batch_id,
vec![reorg_queries, insert_queries].into_iter().flatten(),
)?;
}
let block_ids: HashMap<_, _> = blocks
.into_iter()
.enumerate()
.map(|(i, tx)| (tx.hash, next_id + i as i64))
.collect();
next_id += blocks_len as i64;
blocks_tx.send((
batch_id,
block_ids,
txs,
outputs,
inputs,
max_block_height,
pending_queries,
))?;
}
Ok(())
})
});
let txs_thread = std::thread::spawn({
let conn = establish_connection()?;
fn_log_err("db_worker_txs", move || {
let mut next_id = read_next_tx_id(&conn)?;
while let Ok((
batch_id,
block_ids,
txs,
outputs,
inputs,
max_block_height,
mut pending_queries,
)) = txs_rx.recv()
{
let queries = create_bulk_insert_txs_query(&txs, &block_ids);
if !mode.is_bulk() {
pending_queries.push(queries);
} else {
assert_eq!(next_id, read_next_tx_id(&conn)?);
execute_bulk_insert_transcation(
&conn,
"txs",
txs.len(),
batch_id,
queries.into_iter(),
)?
};
let batch_len = txs.len();
let tx_ids: HashMap<_, _> = txs
.into_iter()
.enumerate()
.map(|(i, tx)| (tx.hash, next_id + i as i64))
.collect();
next_id += batch_len as i64;
txs_tx.send((
batch_id,
block_ids,
tx_ids,
outputs,
inputs,
max_block_height,
pending_queries,
))?;
}
Ok(())
})
});
let outputs_thread = std::thread::spawn({
let conn = establish_connection()?;
let utxo_set_cache = utxo_set_cache.clone();
fn_log_err("db_worker_outputs", move || {
let mut next_id = read_next_output_id(&conn)?;
while let Ok((
batch_id,
block_ids,
tx_ids,
outputs,
inputs,
max_block_height,
mut pending_queries,
)) = outputs_rx.recv()
{
let queries = create_bulk_insert_outputs_query(&outputs, &tx_ids);
if !mode.is_bulk() {
pending_queries.push(queries);
} else {
assert_eq!(next_id, read_next_output_id(&conn)?);
execute_bulk_insert_transcation(
&conn,
"outputs",
outputs.len(),
batch_id,
queries.into_iter(),
)?;
}
let mut utxo_lock = utxo_set_cache.lock().unwrap();
outputs.iter().enumerate().for_each(|(i, output)| {
let id = next_id + (i as i64);
utxo_lock.insert(output.out_point, id, output.value);
});
drop(utxo_lock);
next_id += outputs.len() as i64;
outputs_tx.send((
batch_id,
block_ids,
tx_ids,
inputs,
max_block_height,
pending_queries,
))?;
}
Ok(())
})
});
let inputs_thread = std::thread::spawn({
let conn = establish_connection()?;
let utxo_set_cache = utxo_set_cache.clone();
fn_log_err("db_worker_inputs", move || {
while let Ok((
batch_id,
block_ids,
tx_ids,
inputs,
max_block_height,
mut pending_queries,
)) = inputs_rx.recv()
{
let mut utxo_lock = utxo_set_cache.lock().unwrap();
let (mut output_ids, missing) =
utxo_lock.consume(inputs.iter().map(|i| i.out_point));
drop(utxo_lock);
let missing = UtxoSetCache::fetch_missing(&conn, missing)?;
for (k, v) in missing.into_iter() {
output_ids.insert(k, v);
}
let mut queries =
create_bulk_insert_inputs_query(&inputs, &output_ids, &tx_ids);
queries.push(format!(
"UPDATE indexer_state SET height = {};",
max_block_height
));
if mode.is_bulk() {
pending_queries.push(queries);
execute_bulk_insert_transcation(
&conn,
"all block data",
block_ids.len(),
batch_id,
pending_queries.into_iter().flatten(),
)?;
} else {
execute_bulk_insert_transcation(
&conn,
"inputs",
inputs.len(),
batch_id,
queries.into_iter(),
)?;
}
info!("Block {}H fully indexed and commited", max_block_height);
let mut any_missing = false;
let mut lock = in_flight.lock().unwrap();
for hash in block_ids.keys() {
let missing = !lock.remove(&hash);
any_missing = any_missing || missing;
}
drop(lock);
assert!(!any_missing);
}
Ok(())
})
});
Ok(Self {
tx: Some(tx),
txs_thread: Some(txs_thread),
outputs_thread: Some(outputs_thread),
inputs_thread: Some(inputs_thread),
blocks_thread: Some(blocks_thread),
})
}
}
impl Drop for Pipeline {
fn drop(&mut self) {
drop(self.tx.take());
let joins = vec![
self.txs_thread.take().unwrap(),
self.outputs_thread.take().unwrap(),
self.inputs_thread.take().unwrap(),
self.blocks_thread.take().unwrap(),
];
for join in joins {
join.join()
.expect("Couldn't join on thread")
.expect("Worker thread panicked");
}
}
}
pub struct Postresql {
connection: Connection,
cached_max_height: Option<u64>,
pipeline: Option<Pipeline>,
batch: Vec<crate::BlockCore>,
batch_txs_total: u64,
batch_id: u64,
mode: Mode,
node_chain_head_height: BlockHeight,
in_flight: Arc<Mutex<BlocksInFlight>>,
// for double checking logic here
last_inserted_block_height: Option<BlockHeight>,
num_inserted: u64,
}
impl Drop for Postresql {
fn drop(&mut self) {
self.stop_workers();
}
}
impl Postresql {
pub fn new(node_chain_head_height: BlockHeight) -> Result<Self> {
let connection = establish_connection()?;
Self::init(&connection)?;
let (height, mode) = Self::read_indexer_state(&connection)?;
let mut s = Postresql {
connection,
pipeline: None,
cached_max_height: None,
batch: vec![],
batch_txs_total: 0,
batch_id: 0,
mode,
node_chain_head_height,
in_flight: Arc::new(Mutex::new(BlocksInFlight::new())),
last_inserted_block_height: None,
num_inserted: 0,
};
if s.mode == Mode::FreshBulk {
s.self_test()?;
} else if s.mode == Mode::Bulk {
s.wipe_inconsistent_data(height)?;
}
s.start_workers();
Ok(s)
}
fn read_indexer_state(conn: &Connection) -> Result<(Option<u64>, Mode)> {
let state = conn.query("SELECT bulk_mode, height FROM indexer_state", &[])?;
if let Some(state) = state.iter().next() {
let is_bulk_mode = state.get(0);
let mode = if is_bulk_mode {
let count = conn
.query("SELECT COUNT(*) FROM BLOCKS", &[])?
.into_iter()
.next()
.expect("A row from the db")
.get::<_, i64>(0);
if count == 0 {
Mode::FreshBulk
} else {
Mode::Bulk
}
} else {
Mode::Normal
};
Ok((state.get::<_, Option<i64>>(1).map(|h| h as u64), mode))
} else {
conn.execute(
"INSERT INTO indexer_state (bulk_mode, height) VALUES ($1, NULL)",
&[&true],
)?;
Ok((None, Mode::FreshBulk))
}
}
fn init(conn: &Connection) -> Result<()> {
info!("Creating db schema");
conn.batch_execute(include_str!("pg/init_base.sql"))?;
Ok(())
}
/// Wipe all the data that might have been added, before a `block` entry
/// was commited to the DB.
///
/// `Blocks` is the last table to have data inserted, and is
/// used as a commitment that everything else was inserted already..
fn wipe_inconsistent_data(&mut self, height: Option<BlockHeight>) -> Result<()> {
// there could be no inconsistent date outside of bulk mode
if self.mode.is_not_fresh_bulk() {
if let Some(height) = height {
info!("Deleting potentially inconsistent data from previous bulk run");
self.wipe_to_height(height)?;
}
}
Ok(())
}
fn stop_workers(&mut self) {
debug!("Stopping DB pipeline workers");
self.pipeline.take();
debug!("Stopped DB pipeline workers");
assert!(self.in_flight.lock().unwrap().is_empty());
}
fn start_workers(&mut self) {
debug!("Starting DB pipeline workers");
// TODO: This `unwrap` is not OK. Connecting to db can fail.
self.pipeline = Some(Pipeline::new(self.in_flight.clone(), self.mode).unwrap())
}
fn flush_workers(&mut self) {
self.stop_workers();
self.start_workers();
}
fn update_max_height(&mut self, block: &crate::BlockCore) {
self.cached_max_height = Some(
self.cached_max_height
.map_or(block.height, |h| std::cmp::max(h, block.height)),
);
}
fn flush_batch(&mut self) -> Result<()> {
if self.batch.is_empty() {
return Ok(());
}
trace!(
"Flushing batch {}, with {} txes",
self.batch_id,
self.batch_txs_total
);
let parsed: Result<Vec<_>> = std::mem::replace(&mut self.batch, vec![])
.par_iter()
.map(|block_info| super::parse_node_block(&block_info))
.collect();
let parsed = parsed?;
let mut in_flight = self.in_flight.lock().expect("locking works");
for parsed in &parsed {
in_flight.insert(parsed.block.hash);
}
drop(in_flight);
self.pipeline
.as_ref()
.expect("workers running")
.tx
.as_ref()
.expect("tx not null")
.send((self.batch_id, parsed))
.expect("Send should not fail");
trace!("Batch flushed");
self.batch_txs_total = 0;
self.batch_id += 1;
Ok(())
}
pub fn wipe() -> Result<()> {
info!("Wiping db schema");
let connection = establish_connection()?;
connection.batch_execute(include_str!("pg/wipe.sql"))?;
Ok(())
}
fn set_mode(&mut self, mode: Mode) -> Result<()> {
if self.mode == mode {
return Ok(());
}
self.set_mode_uncodintionally(mode)?;
Ok(())
}
fn set_mode_uncodintionally(&mut self, mode: Mode) -> Result<()> {
self.mode = mode;
info!("Entering {}", mode.to_entering_str());
self.flush_batch()?;
self.flush_workers();
self.connection.batch_execute(mode.to_sql_query_str())?;
// commit to the new mode in the db last
self.connection.execute(
"UPDATE indexer_state SET bulk_mode = $1",
&[&(mode.is_bulk())],
)?;
Ok(())
}
/// Switch between all modes to double-check all queries
fn self_test(&mut self) -> Result<()> {
assert_eq!(self.mode, Mode::FreshBulk);
self.set_mode_uncodintionally(Mode::FreshBulk)?;
self.set_mode_uncodintionally(Mode::Bulk)?;
self.set_mode_uncodintionally(Mode::Normal)?;
self.set_mode_uncodintionally(Mode::Bulk)?;
self.set_mode_uncodintionally(Mode::FreshBulk)?;
Ok(())
}
}
impl DataStore for Postresql {
fn get_head_height(&mut self) -> Result<Option<BlockHeight>> {
if let Some(height) = self.cached_max_height {
return Ok(Some(height));
}
self.cached_max_height = self
.connection
.query("SELECT height FROM blocks ORDER BY id DESC LIMIT 1", &[])?
.iter()
.next()
.and_then(|row| row.get::<_, Option<i64>>(0))
.map(|u| u as u64);
Ok(self.cached_max_height)
}
fn get_hash_by_height(&mut self, height: BlockHeight) -> Result<Option<BlockHash>> {
if let Some(max_height) = self.cached_max_height {
if max_height < height {
return Ok(None);
}
}
// TODO: This could be done better, if we were just tracking
// things in flight better
self.flush_batch()?;
if !self.in_flight.lock().unwrap().is_empty() {
eprintln!("TODO: Unnecessary flush");
self.flush_workers();
}
Ok(self
.connection
.query(
"SELECT hash FROM blocks WHERE height = $1 AND orphaned = false",
&[&(height as i64)],
)?
.iter()
.next()
.map(|row| row.get::<_, Vec<u8>>(0))
.map(|mut human_bytes| {
human_bytes.reverse();
BlockHash::from(human_bytes.as_slice())
}))
}
fn insert(&mut self, block: crate::BlockCore) -> Result<()> {
if let Some(db_hash) = self.get_hash_by_height(block.height)? {
if db_hash != block.id {
// we move forward and there is a query in a inseting
// pipeline (`reorg_queries`)
// that will mark anything above and eq this hight as orphaned
info!(
"Node block != db block at {}H; {} != {} - reorg",
block.height, block.id, db_hash
);
} else {
// we already have exact same block, non-orphaned, and we don't want
// to add it twice
trace!(
"Skip indexing alredy included block {}H {}",
block.height,
block.id
);
// if we're here, we must have not inserted anything yet,
// and these are prefetcher starting from some past blocks
assert_eq!(self.num_inserted, 0);
return Ok(());
}
} else {
// we can only be inserting non-reorg blocks one height at a time
if let Some(last_inserted_block_height) = self.last_inserted_block_height {
assert_eq!(block.height, last_inserted_block_height + 1);
}
}
self.num_inserted += 1;
self.last_inserted_block_height = Some(block.height);
self.update_max_height(&block);
self.batch_txs_total += block.data.txdata.len() as u64;
let height = block.height;
self.batch.push(block);
if self.mode.is_bulk() {
if self.batch_txs_total > 100_000 {
self.flush_batch()?;
}
} else if self.cached_max_height.expect("Already set") <= height {
self.flush_batch()?;
}
if self.node_chain_head_height == height {
self.set_mode(Mode::Normal)?;
}
Ok(())
}
fn wipe_to_height(&mut self, height: u64) -> Result<()> {
info!("Deleting data above {}H", height);
let transaction = self.connection.transaction()?;
info!("Deleting blocks above {}H", height);
transaction.execute("DELETE FROM blocks WHERE height > $1", &[&(height as i64)])?;
info!("Deleting txs above {}H", height);
transaction.execute("DELETE FROM txs WHERE id IN (SELECT txs.id FROM txs LEFT JOIN blocks ON txs.block_id = blocks.id WHERE blocks.id IS NULL)", &[])?;
info!("Deleting outputs above {}H", height);
transaction.execute("DELETE FROM outputs WHERE id IN (SELECT outputs.id FROM outputs LEFT JOIN txs ON outputs.tx_id = txs.id WHERE txs.id IS NULL)", &[])?;
info!("Deleting inputs above {}H", height);
transaction.execute("DELETE FROM inputs WHERE output_id IN (SELECT inputs.output_id FROM inputs LEFT JOIN txs ON inputs.tx_id = txs.id WHERE txs.id IS NULL)", &[])?;
transaction.commit()?;
trace!("Deleted data above {}H", height);
Ok(())
}
}
|
#[doc = "Register `K2LR` writer"]
pub type W = crate::W<K2LR_SPEC>;
#[doc = "Field `k` writer - k96"]
pub type K_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 31, O, u32>;
#[doc = "Field `b121` writer - b121"]
pub type B121_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl W {
#[doc = "Bits 0:30 - k96"]
#[inline(always)]
#[must_use]
pub fn k(&mut self) -> K_W<K2LR_SPEC, 0> {
K_W::new(self)
}
#[doc = "Bit 25 - b121"]
#[inline(always)]
#[must_use]
pub fn b121(&mut self) -> B121_W<K2LR_SPEC, 25> {
B121_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "key registers\n\nYou can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`k2lr::W`](W). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct K2LR_SPEC;
impl crate::RegisterSpec for K2LR_SPEC {
type Ux = u32;
}
#[doc = "`write(|w| ..)` method takes [`k2lr::W`](W) writer structure"]
impl crate::Writable for K2LR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets K2LR to value 0"]
impl crate::Resettable for K2LR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#[doc = "Register `SCSR` reader"]
pub type R = crate::R<SCSR_SPEC>;
#[doc = "Register `SCSR` writer"]
pub type W = crate::W<SCSR_SPEC>;
#[doc = "Field `SRAM2ER` reader - SRAM2 erase"]
pub type SRAM2ER_R = crate::BitReader<SRAM2ERW_A>;
#[doc = "SRAM2 erase\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SRAM2ERW_A {
#[doc = "1: Start SRAM2 erase operation"]
Erase = 1,
}
impl From<SRAM2ERW_A> for bool {
#[inline(always)]
fn from(variant: SRAM2ERW_A) -> Self {
variant as u8 != 0
}
}
impl SRAM2ER_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<SRAM2ERW_A> {
match self.bits {
true => Some(SRAM2ERW_A::Erase),
_ => None,
}
}
#[doc = "Start SRAM2 erase operation"]
#[inline(always)]
pub fn is_erase(&self) -> bool {
*self == SRAM2ERW_A::Erase
}
}
#[doc = "Field `SRAM2ER` writer - SRAM2 erase"]
pub type SRAM2ER_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, SRAM2ERW_A>;
impl<'a, REG, const O: u8> SRAM2ER_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Start SRAM2 erase operation"]
#[inline(always)]
pub fn erase(self) -> &'a mut crate::W<REG> {
self.variant(SRAM2ERW_A::Erase)
}
}
#[doc = "Field `SRAMBSY` reader - SRAM1, SRAM2 and PKA SRAM busy by erase operation"]
pub type SRAMBSY_R = crate::BitReader<SRAMBSY_A>;
#[doc = "SRAM1, SRAM2 and PKA SRAM busy by erase operation\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SRAMBSY_A {
#[doc = "0: No SRAM1 or SRAM2 erase operation is ongoing"]
Idle = 0,
#[doc = "1: SRAM1 or SRAM2 erase operation is ongoing"]
Busy = 1,
}
impl From<SRAMBSY_A> for bool {
#[inline(always)]
fn from(variant: SRAMBSY_A) -> Self {
variant as u8 != 0
}
}
impl SRAMBSY_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SRAMBSY_A {
match self.bits {
false => SRAMBSY_A::Idle,
true => SRAMBSY_A::Busy,
}
}
#[doc = "No SRAM1 or SRAM2 erase operation is ongoing"]
#[inline(always)]
pub fn is_idle(&self) -> bool {
*self == SRAMBSY_A::Idle
}
#[doc = "SRAM1 or SRAM2 erase operation is ongoing"]
#[inline(always)]
pub fn is_busy(&self) -> bool {
*self == SRAMBSY_A::Busy
}
}
#[doc = "Field `PKASRAMBSY` reader - PKA SRAM busy by erase operation"]
pub type PKASRAMBSY_R = crate::BitReader<PKASRAMBSY_A>;
#[doc = "PKA SRAM busy by erase operation\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PKASRAMBSY_A {
#[doc = "0: No PKA SRAM erase operation is ongoing"]
Idle = 0,
#[doc = "1: PKA SRAM erase operation is ongoing"]
Busy = 1,
}
impl From<PKASRAMBSY_A> for bool {
#[inline(always)]
fn from(variant: PKASRAMBSY_A) -> Self {
variant as u8 != 0
}
}
impl PKASRAMBSY_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PKASRAMBSY_A {
match self.bits {
false => PKASRAMBSY_A::Idle,
true => PKASRAMBSY_A::Busy,
}
}
#[doc = "No PKA SRAM erase operation is ongoing"]
#[inline(always)]
pub fn is_idle(&self) -> bool {
*self == PKASRAMBSY_A::Idle
}
#[doc = "PKA SRAM erase operation is ongoing"]
#[inline(always)]
pub fn is_busy(&self) -> bool {
*self == PKASRAMBSY_A::Busy
}
}
impl R {
#[doc = "Bit 0 - SRAM2 erase"]
#[inline(always)]
pub fn sram2er(&self) -> SRAM2ER_R {
SRAM2ER_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - SRAM1, SRAM2 and PKA SRAM busy by erase operation"]
#[inline(always)]
pub fn srambsy(&self) -> SRAMBSY_R {
SRAMBSY_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 8 - PKA SRAM busy by erase operation"]
#[inline(always)]
pub fn pkasrambsy(&self) -> PKASRAMBSY_R {
PKASRAMBSY_R::new(((self.bits >> 8) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - SRAM2 erase"]
#[inline(always)]
#[must_use]
pub fn sram2er(&mut self) -> SRAM2ER_W<SCSR_SPEC, 0> {
SRAM2ER_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "SCSR\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`scsr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`scsr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct SCSR_SPEC;
impl crate::RegisterSpec for SCSR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`scsr::R`](R) reader structure"]
impl crate::Readable for SCSR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`scsr::W`](W) writer structure"]
impl crate::Writable for SCSR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets SCSR to value 0"]
impl crate::Resettable for SCSR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
// Built-In Attributes
#![no_std]
// Imports
extern crate eng_wasm;
extern crate eng_wasm_derive;
extern crate serde;
use eng_wasm::*;
use eng_wasm_derive::pub_interface;
use serde::{Serialize, Deserialize};
// Encrypted state keys
static PARTICIPANTS: &str = "participants";
// Structs
#[derive(Serialize, Deserialize)]
pub struct Participant {
id: U256,
age: U256,
real: bool,
result: Option<U256>,
}
// Public struct Contract which will consist of private and public-facing secret contract functions
pub struct Contract;
// Private functions accessible only by the secret contract
impl Contract {
fn get_participants() -> Vec<Participant> {
return read_state!(PARTICIPANTS).unwrap_or_default();
}
}
// Public trait defining public-facing secret contract functions
#[pub_interface]
pub trait ContractInterface{
fn add_participant(id: U256, age: U256, real: bool);
fn get_participant_count() -> U256;
fn write_result(id: U256, result: U256);
fn sum_ages() -> U256;
fn sum_results() -> U256;
fn compute_avg_result(real: bool) -> U256;
// fn computePlaceboResult() -> U256;
// fn computeRealResult(age: U8, overunder: bool) -> U256;
// fn computePlaceboResult(age: U8, overunder: bool) -> U256;
}
// Implementation of the public-facing secret contract functions defined in the ContractInterface
// trait implementation for the Contract struct above
impl ContractInterface for Contract {
#[no_mangle]
fn add_participant(id: U256, age: U256, real: bool) {
let mut participants = Self::get_participants();
participants.push(Participant {
id,
age,
real,
result: None,
});
write_state!(PARTICIPANTS => participants);
}
#[no_mangle]
fn get_participant_count() -> U256 {
let participants = Self::get_participants();
return U256::from(participants.len());
}
#[no_mangle]
fn write_result(_id: U256, _result: U256) {
let mut participants = Self::get_participants();
for mut participant in &mut participants {
if _id == participant.id {
participant.result = Some(_result);
}
}
write_state!(PARTICIPANTS => participants);
}
#[no_mangle]
fn sum_ages() -> U256 {
let participants = Self::get_participants();
let mut sum: U256 = U256::from(0);
for participant in participants {
sum += participant.age;
}
return sum;
}
#[no_mangle]
fn sum_results() -> U256 {
let participants = Self::get_participants();
let mut sum: U256 = U256::from(0);
for participant in participants {
sum += participant.result.unwrap();
}
return sum;
}
fn compute_avg_result(_real: bool) -> U256 {
let participants = Self::get_participants();
let mut sum: U256 = U256::from(0);
let mut count: U256 = U256::from(0);
for participant in participants {
if participant.real == _real {
sum += participant.result.unwrap();
count = count + 1;
}
}
let calculation: U256 = sum / count;
return calculation;
}
}
|
//! The RFC 959 Data Port (`PORT`) command
//
// The argument is a HOST-PORT specification for the data port
// to be used in data connection. There are defaults for both
// the user and server data ports, and under normal
// circumstances this command and its reply are not needed. If
// this command is used, the argument is the concatenation of a
// 32-bit internet host address and a 16-bit TCP port address.
// This address information is broken into 8-bit fields and the
// value of each field is transmitted as a decimal number (in
// character string representation). The fields are separated
// by commas. A port command would be:
//
// PORT h1,h2,h3,h4,p1,p2
//
// where h1 is the high order 8 bits of the internet host
// address.
use crate::{
auth::UserDetail,
server::{
chancomms::{DataChanCmd, ProxyLoopSender},
controlchan::{
error::ControlChanError,
handler::{CommandContext, CommandHandler},
Reply, ReplyCode,
},
datachan,
session::SharedSession,
ControlChanMsg,
},
storage::{Metadata, StorageBackend},
};
use async_trait::async_trait;
use std::io;
use std::net::{Ipv4Addr, SocketAddrV4};
use tokio::net::TcpStream;
use tokio::sync::mpsc::{channel, Receiver, Sender};
#[derive(Debug)]
pub struct Port {
addr: String,
}
impl Port {
pub fn new(addr: String) -> Self {
Port { addr }
}
// modifies the session by adding channels that are used to communicate with the data connection
// processing loop.
#[tracing_attributes::instrument]
async fn setup_inter_loop_comms<S, U>(&self, session: SharedSession<S, U>, control_loop_tx: Sender<ControlChanMsg>)
where
U: UserDetail + 'static,
S: StorageBackend<U> + 'static,
S::Metadata: Metadata,
{
let (cmd_tx, cmd_rx): (Sender<DataChanCmd>, Receiver<DataChanCmd>) = channel(1);
let (data_abort_tx, data_abort_rx): (Sender<()>, Receiver<()>) = channel(1);
let mut session = session.lock().await;
session.data_cmd_tx = Some(cmd_tx);
session.data_cmd_rx = Some(cmd_rx);
session.data_abort_tx = Some(data_abort_tx);
session.data_abort_rx = Some(data_abort_rx);
session.control_msg_tx = Some(control_loop_tx);
}
// For non-proxy mode we choose a data port here and start listening on it while letting the control
// channel know (via method return) what the address is that the client should connect to.
#[tracing_attributes::instrument]
async fn handle_nonproxy_mode<S, U>(&self, args: CommandContext<S, U>) -> Result<Reply, ControlChanError>
where
U: UserDetail + 'static,
S: StorageBackend<U> + 'static,
S::Metadata: Metadata,
{
let CommandContext {
logger,
passive_host: _passive_host,
tx_control_chan: tx,
session,
..
} = args;
let bytes: Vec<u8> = self.addr.split(',').map(|x| x.parse::<u8>()).filter_map(Result::ok).collect();
let port = ((bytes[4] as u16) << 8) | bytes[5] as u16;
let addr = SocketAddrV4::new(Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]), port);
let stream: io::Result<TcpStream> = TcpStream::connect(addr).await;
let stream = match stream {
Err(_) => return Ok(Reply::new(ReplyCode::CantOpenDataConnection, "No data connection established")),
Ok(s) => s,
};
self.setup_inter_loop_comms(session.clone(), tx).await;
datachan::spawn_processing(logger, session, stream).await;
Ok(Reply::new(ReplyCode::CommandOkay, "Entering Active mode"))
}
#[tracing_attributes::instrument]
async fn handle_proxy_mode<S, U>(&self, args: CommandContext<S, U>, tx: ProxyLoopSender<S, U>) -> Result<Reply, ControlChanError>
where
U: UserDetail + 'static,
S: StorageBackend<U> + 'static,
S::Metadata: Metadata,
{
Ok(Reply::new(
ReplyCode::CommandNotImplemented,
"ACTIVE mode is not supported with Proxy - use PASSIVE instead",
))
}
}
#[async_trait]
impl<Storage, User> CommandHandler<Storage, User> for Port
where
User: UserDetail + 'static,
Storage: StorageBackend<User> + 'static,
Storage::Metadata: Metadata,
{
#[tracing_attributes::instrument]
async fn handle(&self, args: CommandContext<Storage, User>) -> Result<Reply, ControlChanError> {
let sender: Option<ProxyLoopSender<Storage, User>> = args.tx_proxyloop.clone();
match sender {
Some(tx) => self.handle_proxy_mode(args, tx).await,
None => self.handle_nonproxy_mode(args).await,
}
}
}
|
use std::fs::File;
use std::io::{self, BufRead};
use std::path::Path;
fn main() {
let mut elements = [0; 1000];
let mut index = 0;
// File hosts must exist in current path before this produces output
if let Ok(lines) = read_lines("input") {
// Consumes the iterator, returns an (Optional) String
for line in lines {
if let Ok(ip) = line {
if let Ok(num) = ip.parse() {
elements[index] = num;
}
// println!("{}", ip);
index = index + 1;
}
}
}
for elem in elements.iter() {
println!("{}", elem);
}
// Exo 1
// for i in elements.iter() {
// for j in elements.iter() {
// if i + j == 2020 {
// println!("{} : {}",i,j);
// println!("{}",i*j);
// return;
// }
// }
// }
// Exo 2
for i in elements.iter() {
for j in elements.iter() {
for k in elements.iter() {
if i + j + k == 2020 {
println!("{} : {} : {}",i,j,k);
println!("{}",i*j*k);
return;
}
}
}
}
}
// The output is wrapped in a Result to allow matching on errors
// Returns an Iterator to the Reader of the lines of the file.
fn read_lines<P>(filename: P) -> io::Result<io::Lines<io::BufReader<File>>>
where P: AsRef<Path>, {
let file = File::open(filename)?;
Ok(io::BufReader::new(file).lines())
}
|
#[macro_use]
extern crate dotenv_codegen;
extern crate diesel_derive_enum;
extern crate itertools;
extern crate juniper;
use ::mystore_lib::db_connection::establish_connection;
use actix_cors::Cors;
use actix_identity::{CookieIdentityPolicy, IdentityService};
use actix_web::http::header;
use actix_web::middleware::Logger;
use actix_web::{App, HttpServer};
use chrono::Duration;
use csrf_token::CsrfTokenGenerator;
use ::mystore_lib::graphql::{graphql,graphiql};
use ::mystore_lib::graphql::schema::create_schema;
use ::mystore_lib::handlers::authentication::{login, logout};
use ::mystore_lib::handlers::register::register;
#[actix_rt::main]
async fn main() -> std::io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=debug");
env_logger::init();
let csrf_token_header = header::HeaderName::from_lowercase(b"x-csrf-token").unwrap();
let schema = std::sync::Arc::new(create_schema());
HttpServer::new(move || {
App::new()
.wrap(Logger::default())
.wrap(IdentityService::new(
CookieIdentityPolicy::new(dotenv!("SECRET_KEY").as_bytes())
.domain(dotenv!("MYSTOREDOMAIN"))
.name("mystorejwt")
.path("/")
.max_age(Duration::days(1).num_seconds())
.secure(dotenv!("COOKIE_SECURE").parse().unwrap()),
))
.wrap(
Cors::new()
.send_wildcard()
.allowed_methods(vec!["GET", "POST", "PUT", "DELETE"])
.allowed_headers(vec![
header::AUTHORIZATION,
header::CONTENT_TYPE,
header::ACCEPT,
csrf_token_header.clone(),
])
.expose_headers(vec![csrf_token_header.clone()])
.max_age(3600)
.finish(),
)
.data(CsrfTokenGenerator::new(
dotenv!("CSRF_TOKEN_KEY").as_bytes().to_vec(),
Duration::hours(1),
))
.data(establish_connection())
.data(schema.clone())
.service(register)
.service(login)
.service(logout)
.service(graphql)
.service(graphiql)
})
.bind("127.0.0.1:8088")?
.run()
.await
}
|
mod example;
use crate::ExampleContainer;
use example::*;
use yew::prelude::*;
use yewprint::{HtmlSelect, Intent, H1, H5};
pub struct SpinnerDoc {
callback: Callback<ExampleProps>,
state: ExampleProps,
}
impl Component for SpinnerDoc {
type Message = ExampleProps;
type Properties = ();
fn create(_: Self::Properties, link: ComponentLink<Self>) -> Self {
SpinnerDoc {
callback: link.callback(|x| x),
state: ExampleProps {
intent: None,
size: 10,
},
}
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
self.state = msg;
true
}
fn change(&mut self, _props: Self::Properties) -> ShouldRender {
true
}
fn view(&self) -> Html {
let example_props = self.state.clone();
let source = crate::include_raw_html!(
concat!(env!("OUT_DIR"), "/", file!(), ".html"),
"bp3-code-block"
);
html! {
<div>
<H1 class=classes!("docs-title")>{"Spinner"}</H1>
<div>
<ExampleContainer
source=source
props=Some(html! {
<SpinnerProps
callback={self.callback.clone()}
props=example_props.clone()
/>
})
>
<Example with example_props />
</ExampleContainer>
</div>
</div>
}
}
}
crate::build_example_prop_component! {
SpinnerProps for ExampleProps =>
fn view(&self) -> Html {
html! {
<div>
<H5>{"Props"}</H5>
<div>
<p>{"Select intent:"}</p>
<HtmlSelect<Option<Intent>>
options={vec![
(None, "None".to_string()),
(Some(Intent::Primary), "Primary".to_string()),
(Some(Intent::Success), "Success".to_string()),
(Some(Intent::Warning), "Warning".to_string()),
(Some(Intent::Danger), "Danger".to_string()),
]}
onchange=self.update_props(|props, intent| ExampleProps {
intent,
..props
})
/>
<p>{"Select Size:"}</p>
<HtmlSelect<u32>
options={vec![
(20, "Small".to_string()),
(50, "Standard".to_string()),
(100, "Large".to_string()),
]}
onchange=self.update_props(|props, size| ExampleProps {
size,
..props
})
/>
</div>
</div>
}
}
}
|
#[doc = "Register `AHB2RSTR` reader"]
pub type R = crate::R<AHB2RSTR_SPEC>;
#[doc = "Register `AHB2RSTR` writer"]
pub type W = crate::W<AHB2RSTR_SPEC>;
#[doc = "Field `DCMIRST` reader - Camera interface reset"]
pub type DCMIRST_R = crate::BitReader<DCMIRST_A>;
#[doc = "Camera interface reset\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DCMIRST_A {
#[doc = "1: Reset the selected module"]
Reset = 1,
}
impl From<DCMIRST_A> for bool {
#[inline(always)]
fn from(variant: DCMIRST_A) -> Self {
variant as u8 != 0
}
}
impl DCMIRST_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<DCMIRST_A> {
match self.bits {
true => Some(DCMIRST_A::Reset),
_ => None,
}
}
#[doc = "Reset the selected module"]
#[inline(always)]
pub fn is_reset(&self) -> bool {
*self == DCMIRST_A::Reset
}
}
#[doc = "Field `DCMIRST` writer - Camera interface reset"]
pub type DCMIRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, DCMIRST_A>;
impl<'a, REG, const O: u8> DCMIRST_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Reset the selected module"]
#[inline(always)]
pub fn reset(self) -> &'a mut crate::W<REG> {
self.variant(DCMIRST_A::Reset)
}
}
#[doc = "Field `CRYPRST` reader - Cryptographic module reset"]
pub use DCMIRST_R as CRYPRST_R;
#[doc = "Field `HSAHRST` reader - Hash module reset"]
pub use DCMIRST_R as HSAHRST_R;
#[doc = "Field `RNGRST` reader - Random number generator module reset"]
pub use DCMIRST_R as RNGRST_R;
#[doc = "Field `OTGFSRST` reader - USB OTG FS module reset"]
pub use DCMIRST_R as OTGFSRST_R;
#[doc = "Field `CRYPRST` writer - Cryptographic module reset"]
pub use DCMIRST_W as CRYPRST_W;
#[doc = "Field `HSAHRST` writer - Hash module reset"]
pub use DCMIRST_W as HSAHRST_W;
#[doc = "Field `RNGRST` writer - Random number generator module reset"]
pub use DCMIRST_W as RNGRST_W;
#[doc = "Field `OTGFSRST` writer - USB OTG FS module reset"]
pub use DCMIRST_W as OTGFSRST_W;
impl R {
#[doc = "Bit 0 - Camera interface reset"]
#[inline(always)]
pub fn dcmirst(&self) -> DCMIRST_R {
DCMIRST_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 4 - Cryptographic module reset"]
#[inline(always)]
pub fn cryprst(&self) -> CRYPRST_R {
CRYPRST_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - Hash module reset"]
#[inline(always)]
pub fn hsahrst(&self) -> HSAHRST_R {
HSAHRST_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - Random number generator module reset"]
#[inline(always)]
pub fn rngrst(&self) -> RNGRST_R {
RNGRST_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - USB OTG FS module reset"]
#[inline(always)]
pub fn otgfsrst(&self) -> OTGFSRST_R {
OTGFSRST_R::new(((self.bits >> 7) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Camera interface reset"]
#[inline(always)]
#[must_use]
pub fn dcmirst(&mut self) -> DCMIRST_W<AHB2RSTR_SPEC, 0> {
DCMIRST_W::new(self)
}
#[doc = "Bit 4 - Cryptographic module reset"]
#[inline(always)]
#[must_use]
pub fn cryprst(&mut self) -> CRYPRST_W<AHB2RSTR_SPEC, 4> {
CRYPRST_W::new(self)
}
#[doc = "Bit 5 - Hash module reset"]
#[inline(always)]
#[must_use]
pub fn hsahrst(&mut self) -> HSAHRST_W<AHB2RSTR_SPEC, 5> {
HSAHRST_W::new(self)
}
#[doc = "Bit 6 - Random number generator module reset"]
#[inline(always)]
#[must_use]
pub fn rngrst(&mut self) -> RNGRST_W<AHB2RSTR_SPEC, 6> {
RNGRST_W::new(self)
}
#[doc = "Bit 7 - USB OTG FS module reset"]
#[inline(always)]
#[must_use]
pub fn otgfsrst(&mut self) -> OTGFSRST_W<AHB2RSTR_SPEC, 7> {
OTGFSRST_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "AHB2 peripheral reset register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ahb2rstr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ahb2rstr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct AHB2RSTR_SPEC;
impl crate::RegisterSpec for AHB2RSTR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`ahb2rstr::R`](R) reader structure"]
impl crate::Readable for AHB2RSTR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`ahb2rstr::W`](W) writer structure"]
impl crate::Writable for AHB2RSTR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets AHB2RSTR to value 0"]
impl crate::Resettable for AHB2RSTR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use std::collections::HashMap;
use std::io::Write;
use std::path::{Path,PathBuf};
#[derive(Debug)]
pub struct Neighborhood<T> {
barcodes: Vec<(Vec<u8>, T)>
}
impl <T> Neighborhood<T> {
fn new() -> Self {
Neighborhood {
barcodes: Vec::new()
}
}
fn insert(&mut self, barcode: Vec<u8>, value: T) -> () {
self.barcodes.push((barcode, value));
}
pub fn barcodes(&self) -> impl Iterator<Item = &(Vec<u8>, T)> {
self.barcodes.iter()
}
pub fn into_barcodes(self) -> impl Iterator<Item = (Vec<u8>, T)> {
self.barcodes.into_iter()
}
pub fn len(&self) -> usize { self.barcodes.len() }
// Collecting a neighborhood:
// 1. Pick a node arbitrarily
// a. remove from key set
// b. initialize work stack with node
// 2. Handle a node from work stack
// a. check key set for all near-neighbors
// i. remove near-neighbor from key set
// ii. push near-neighbor onto work stack
// b. add node to neighborhood
// 3. Repeat handling nodes from work stack until empty
pub fn gather_neighborhoods(mut bc_map: HashMap<Vec<u8>, T>) -> Vec<Neighborhood<T>>
{
let mut neighborhoods = Vec::new();
loop {
let mut work_stack = Vec::new();
let mut neighborhood = Neighborhood::new();
let start_ref = match bc_map.iter().next() {
Some(start_ref) => start_ref,
None => { break; }
};
let start = start_ref.0.to_vec();
let value = bc_map.remove(&start).unwrap();
work_stack.push((start, value));
while work_stack.len() > 0 {
let (curr, curr_value) = work_stack.pop().unwrap();
let neighbors = Substitutions::new(&curr).chain(Deletions::new(&curr)).chain(Insertions::new(&curr));
for neighbor in neighbors {
if bc_map.contains_key(&neighbor) {
let neighbor_value = bc_map.remove(&neighbor).unwrap();
work_stack.push((neighbor, neighbor_value));
}
}
neighborhood.insert(curr, curr_value);
}
neighborhoods.push(neighborhood);
}
neighborhoods
}
}
impl <T: OrdEntry> Neighborhood<T> {
pub fn into_sorted(self) -> SortedNeighborhood<T> {
SortedNeighborhood::new(self.barcodes)
}
}
pub trait OrdEntry {
fn entry_cmp(&self, other: &Self) -> std::cmp::Ordering;
}
impl OrdEntry for usize {
fn entry_cmp(&self, other: &Self) -> std::cmp::Ordering { self.cmp(other) }
}
impl <T> OrdEntry for Vec<T> {
fn entry_cmp(&self, other: &Self) -> std::cmp::Ordering {
self.len().cmp(&other.len())
}
}
#[derive(Debug)]
pub struct SortedNeighborhood<T> {
barcodes: Vec<(Vec<u8>, T)>
}
impl <T> SortedNeighborhood<T> {
pub fn barcodes(&self) -> impl Iterator<Item = &(Vec<u8>, T)> {
self.barcodes.iter()
}
pub fn into_barcodes(self) -> impl Iterator<Item = (Vec<u8>, T)> {
self.barcodes.into_iter()
}
pub fn len(&self) -> usize { self.barcodes.len() }
pub fn key_barcode(&self) -> (&[u8], &T) {
let (keybc, keyct) = self.barcodes.first().unwrap();
(keybc, keyct)
}
}
impl <T: OrdEntry> SortedNeighborhood<T> {
pub fn new(mut barcodes: Vec<(Vec<u8>, T)>) -> Self {
barcodes.sort_unstable_by(Self::cmp_entries);
SortedNeighborhood { barcodes: barcodes }
}
fn cmp_entries((bcl, ctl): &(Vec<u8>, T), (bcr, ctr): &(Vec<u8>, T)) -> std::cmp::Ordering {
match ctl.entry_cmp(&ctr) {
std::cmp::Ordering::Less => std::cmp::Ordering::Greater,
std::cmp::Ordering::Greater => std::cmp::Ordering::Less,
std::cmp::Ordering::Equal => {
bcl.cmp(&bcr)
}
}
}
}
impl SortedNeighborhood<usize> {
pub fn total(&self) -> usize {
self.barcodes().map(|(_, ct)| *ct).sum()
}
pub fn write_tables<'a, I>(filebase: &str, nbhd_iter: I) -> Result<(), std::io::Error>
where I: Iterator<Item = &'a SortedNeighborhood<usize>>
{
// Neighborhood grouping statistics
let mut barcodes_out = std::fs::File::create(Self::output_filename(filebase, "-raw-barcodes.txt"))?;
let mut nbhds_out = std::fs::File::create(Self::output_filename(filebase, "-nbhds.txt"))?;
writeln!(barcodes_out, "{}", SortedNeighborhood::barcode_counts_header())?;
writeln!(nbhds_out, "{}", SortedNeighborhood::nbhd_counts_header())?;
for nbhd in nbhd_iter {
nbhd.write_barcode_counts(&mut barcodes_out)?;
nbhd.write_nbhd_counts(&mut nbhds_out)?;
}
Ok(())
}
pub fn barcode_counts_header() -> &'static str { "barcode\tneighborhood\tcount\ttotal\tfraction" }
pub fn write_barcode_counts<W: Write>(&self, out: &mut W) -> Result<(), std::io::Error> {
let (keybc, _keyct) = self.key_barcode();
let total = self.total();
for (bc, ct) in self.barcodes() {
write!(out, "{}\t{}\t{}\t{}\t{:0.3}\n",
String::from_utf8_lossy(bc),
String::from_utf8_lossy(keybc),
ct, total, (*ct as f64) / (total as f64))?;
}
Ok(())
}
pub fn nbhd_counts_header() -> &'static str { "neighborhood\tnum_barcodes\ttotal\tnkey\tfract_nbhd" }
pub fn write_nbhd_counts<W: Write>(&self, out: &mut W) -> Result<(), std::io::Error> {
let (keybc, keyct) = self.key_barcode();
let total = self.total();
write!(out, "{}\t{}\t{}\t{}\t{:0.3}\n",
String::from_utf8_lossy(keybc),
self.len(), total, *keyct,
(*keyct as f64) / (total as f64))
}
fn output_filename(output_base: &str, name: &str) -> PathBuf {
let base_ref: &Path = output_base.as_ref();
let mut namebase = base_ref
.file_name()
.map_or(std::ffi::OsString::new(), std::ffi::OsStr::to_os_string);
namebase.push(name);
base_ref.with_file_name(namebase)
}
}
impl <T> SortedNeighborhood<Vec<T>> {
pub fn to_counts(&self) -> SortedNeighborhood<usize> {
let mut barcode_counts = Vec::new();
for (bc, ents) in self.barcodes.iter() {
barcode_counts.push((bc.to_vec(), ents.len()));
}
SortedNeighborhood { barcodes: barcode_counts }
}
}
// Switch to an interface where mutations (acting on a slice buffer)
// are returned to avoid allocation.
const NTS_LEN: usize = 4;
static NTS: [u8; NTS_LEN] = [b'A', b'C', b'G', b'T'];
struct Substitutions<'a> {
original: &'a [u8],
position: usize,
nt: usize,
}
impl <'a> Substitutions<'a> {
pub fn new(original: &'a [u8]) -> Self {
Substitutions { original: original, position: 0, nt: 0 }
}
}
impl <'a> Iterator for Substitutions<'a> {
type Item = Vec<u8>;
fn next(&mut self) -> Option<Self::Item> {
if self.position >= self.original.len() {
return None;
}
if self.nt >= NTS_LEN {
self.position += 1;
self.nt = 0;
return self.next();
}
if self.original[self.position] == NTS[self.nt] {
self.nt += 1;
return self.next();
}
let mut variant = self.original.to_vec();
variant[self.position] = NTS[self.nt];
self.nt += 1;
return Some(variant);
}
}
struct Insertions<'a> {
original: &'a [u8],
position: usize,
nt: usize,
}
impl <'a> Insertions<'a> {
pub fn new(original: &'a [u8]) -> Self {
Insertions { original: original, position: 0, nt: 0 }
}
}
impl <'a> Iterator for Insertions<'a> {
type Item = Vec<u8>;
fn next(&mut self) -> Option<Self::Item> {
if self.position > self.original.len() {
return None;
}
if self.nt >= NTS_LEN {
self.position += 1;
self.nt = 0;
return self.next();
}
let mut variant = Vec::with_capacity(self.original.len() + 1);
variant.extend_from_slice(&self.original[..self.position]);
variant.push(NTS[self.nt]);
variant.extend_from_slice(&self.original[self.position..]);
self.nt += 1;
return Some(variant);
}
}
struct Deletions<'a> {
original: &'a [u8],
position: usize,
}
impl <'a> Deletions<'a> {
pub fn new(original: &'a [u8]) -> Self {
Deletions { original: original, position: 0 }
}
}
impl <'a> Iterator for Deletions<'a> {
type Item = Vec<u8>;
fn next(&mut self) -> Option<Self::Item> {
if self.position >= self.original.len() {
return None;
}
let mut variant = Vec::with_capacity(self.original.len() - 1);
variant.extend_from_slice(&self.original[..self.position]);
variant.extend_from_slice(&self.original[(self.position+1)..]);
self.position += 1;
return Some(variant);
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::borrow::Borrow;
use counts::*;
fn vec_count_map<V: AsRef<[u8]>, I: IntoIterator<Item = (V, usize)>>(bc_counts: I) -> HashMap<Vec<u8>, usize> {
let mut ctmap = HashMap::new();
for (bc, ct) in bc_counts {
if ctmap.insert(bc.as_ref().to_vec(), ct).is_some() {
panic!("Duplicate barcode {}",
String::from_utf8_lossy(bc.as_ref()));
}
}
ctmap
}
fn nbhd_map<T: Clone, N: Borrow<Neighborhood<T>>>(nbhd: N) -> HashMap<Vec<u8>, T> {
nbhd.borrow().barcodes().map(|pair| pair.clone()).collect()
}
#[test]
fn single_nbhd() {
let count_vec = vec![(b"ACGTACGT", 5),
(b"ACGTTCGT", 3),
(b"ACATACGT", 2),
(b"ACATATGT", 1)];
let nbhds = Neighborhood::gather_neighborhoods(vec_count_map(count_vec.clone()));
assert_eq!(nbhds.len(), 1);
let exp_nbhd: HashMap<Vec<u8>, usize> = vec_count_map(count_vec.clone());
let act_nbhd: HashMap<Vec<u8>, usize> = nbhd_map(&nbhds[0]);
assert_eq!(exp_nbhd, act_nbhd);
}
#[test]
fn two_nbhds() {
let count_vec_1 = vec![(b"ACGTACGT", 5),
(b"ACGTTCGT", 3),
(b"ACATACGT", 2)];
let count_vec_2 = vec![(b"CGTACGTA", 8),
(b"CGTACGAA", 3)];
let mut count_vec = count_vec_1.clone();
count_vec.extend(count_vec_2.clone());
let nbhds = Neighborhood::gather_neighborhoods(vec_count_map(count_vec));
assert_eq!(nbhds.len(), 2);
let exp_nbhd_1 = vec_count_map(count_vec_1.clone());
let exp_nbhd_2 = vec_count_map(count_vec_2.clone());
if nbhd_map(&nbhds[0]) == exp_nbhd_1 {
assert_eq!(nbhd_map(&nbhds[1]), exp_nbhd_2);
} else {
assert_eq!(nbhd_map(&nbhds[0]), exp_nbhd_2);
assert_eq!(nbhd_map(&nbhds[1]), exp_nbhd_1);
}
}
#[test]
fn three_nbhds() {
let count_table = r#"ACGTACGT 5
ACGTTCGT 3
ACATACGT 2
CGTACGTA 8
CGTACGAA 4
GTACGTACG 7
GTACGTCG 1
GTACGCACG 9
GTACGCATCG 6"#;
let count_map = SampleCounts::read(count_table.as_bytes()).unwrap().count_map();
let nbhds = Neighborhood::gather_neighborhoods(count_map);
assert_eq!(nbhds.len(), 3);
let mut exp_a = vec![b"ACGTACGT".to_vec(),
b"ACGTTCGT".to_vec(),
b"ACATACGT".to_vec()];
exp_a.sort();
let mut exp_c = vec![b"CGTACGTA".to_vec(),
b"CGTACGAA".to_vec()];
exp_c.sort();
let mut exp_g = vec![b"GTACGTACG".to_vec(),
b"GTACGTCG".to_vec(),
b"GTACGCACG".to_vec(),
b"GTACGCATCG".to_vec()];
exp_g.sort();
let mut exp = vec![exp_a, exp_c, exp_g];
exp.sort();
let mut act: Vec<Vec<Vec<u8>>> = nbhds.iter().map(|n| {
let mut n_act: Vec<Vec<u8>> = n.barcodes().map(|(bc, _ct)| bc.clone()).collect();
n_act.sort();
n_act }).collect();
act.sort();
assert_eq!(act, exp);
}
}
|
//! Temporary files and directories.
//!
//! - Use the [`tempfile()`] function for temporary files
//! - Use the [`tempdir()`] function for temporary directories.
//!
//! # Design
//!
//! This crate provides several approaches to creating temporary files and directories.
//! [`tempfile()`] relies on the OS to remove the temporary file once the last handle is closed.
//! [`TempDir`] and [`NamedTempFile`] both rely on Rust destructors for cleanup.
//!
//! When choosing between the temporary file variants, prefer `tempfile`
//! unless you either need to know the file's path or to be able to persist it.
//!
//! ## Resource Leaking
//!
//! `tempfile` will (almost) never fail to cleanup temporary resources. However `TempDir` and `NamedTempFile` will
//! fail if their destructors don't run. This is because `tempfile` relies on the OS to cleanup the
//! underlying file, while `TempDir` and `NamedTempFile` rely on rust destructors to do so.
//! Destructors may fail to run if the process exits through an unhandled signal interrupt (like `SIGINT`),
//! or if the instance is declared statically (like with [`lazy_static`]), among other possible
//! reasons.
//!
//! ## Security
//!
//! In the presence of pathological temporary file cleaner, relying on file paths is unsafe because
//! a temporary file cleaner could delete the temporary file which an attacker could then replace.
//!
//! `tempfile` doesn't rely on file paths so this isn't an issue. However, `NamedTempFile` does
//! rely on file paths for _some_ operations. See the security documentation on
//! the `NamedTempFile` type for more information.
//!
//! ## Early drop pitfall
//!
//! Because `TempDir` and `NamedTempFile` rely on their destructors for cleanup, this can lead
//! to an unexpected early removal of the directory/file, usually when working with APIs which are
//! generic over `AsRef<Path>`. Consider the following example:
//!
//! ```no_run
//! # use tempfile::tempdir;
//! # use std::io;
//! # use std::process::Command;
//! # fn main() {
//! # if let Err(_) = run() {
//! # ::std::process::exit(1);
//! # }
//! # }
//! # fn run() -> Result<(), io::Error> {
//! // Create a directory inside of `std::env::temp_dir()`.
//! let temp_dir = tempdir()?;
//!
//! // Spawn the `touch` command inside the temporary directory and collect the exit status
//! // Note that `temp_dir` is **not** moved into `current_dir`, but passed as a reference
//! let exit_status = Command::new("touch").arg("tmp").current_dir(&temp_dir).status()?;
//! assert!(exit_status.success());
//!
//! # Ok(())
//! # }
//! ```
//!
//! This works because a reference to `temp_dir` is passed to `current_dir`, resulting in the
//! destructor of `temp_dir` being run after the `Command` has finished execution. Moving the
//! `TempDir` into the `current_dir` call would result in the `TempDir` being converted into
//! an internal representation, with the original value being dropped and the directory thus
//! being deleted, before the command can be executed.
//!
//! The `touch` command would fail with an `No such file or directory` error.
//!
//! ## Examples
//!
//! Create a temporary file and write some data into it:
//!
//! ```
//! use tempfile::tempfile;
//! use std::io::{self, Write};
//!
//! # fn main() {
//! # if let Err(_) = run() {
//! # ::std::process::exit(1);
//! # }
//! # }
//! # fn run() -> Result<(), io::Error> {
//! // Create a file inside of `std::env::temp_dir()`.
//! let mut file = tempfile()?;
//!
//! writeln!(file, "Brian was here. Briefly.")?;
//! # Ok(())
//! # }
//! ```
//!
//! Create a named temporary file and open an independent file handle:
//!
//! ```
//! use tempfile::NamedTempFile;
//! use std::io::{self, Write, Read};
//!
//! # fn main() {
//! # if let Err(_) = run() {
//! # ::std::process::exit(1);
//! # }
//! # }
//! # fn run() -> Result<(), io::Error> {
//! let text = "Brian was here. Briefly.";
//!
//! // Create a file inside of `std::env::temp_dir()`.
//! let mut file1 = NamedTempFile::new()?;
//!
//! // Re-open it.
//! let mut file2 = file1.reopen()?;
//!
//! // Write some test data to the first handle.
//! file1.write_all(text.as_bytes())?;
//!
//! // Read the test data using the second handle.
//! let mut buf = String::new();
//! file2.read_to_string(&mut buf)?;
//! assert_eq!(buf, text);
//! # Ok(())
//! # }
//! ```
//!
//! Create a temporary directory and add a file to it:
//!
//! ```
//! use tempfile::tempdir;
//! use std::fs::File;
//! use std::io::{self, Write};
//!
//! # fn main() {
//! # if let Err(_) = run() {
//! # ::std::process::exit(1);
//! # }
//! # }
//! # fn run() -> Result<(), io::Error> {
//! // Create a directory inside of `std::env::temp_dir()`.
//! let dir = tempdir()?;
//!
//! let file_path = dir.path().join("my-temporary-note.txt");
//! let mut file = File::create(file_path)?;
//! writeln!(file, "Brian was here. Briefly.")?;
//!
//! // By closing the `TempDir` explicitly, we can check that it has
//! // been deleted successfully. If we don't close it explicitly,
//! // the directory will still be deleted when `dir` goes out
//! // of scope, but we won't know whether deleting the directory
//! // succeeded.
//! drop(file);
//! dir.close()?;
//! # Ok(())
//! # }
//! ```
//!
//! [`tempfile()`]: fn.tempfile.html
//! [`tempdir()`]: fn.tempdir.html
//! [`TempDir`]: struct.TempDir.html
//! [`NamedTempFile`]: struct.NamedTempFile.html
//! [`std::env::temp_dir()`]: https://doc.rust-lang.org/std/env/fn.temp_dir.html
//! [`lazy_static`]: https://github.com/rust-lang-nursery/lazy-static.rs/issues/62
#![doc(
html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://docs.rs/tempfile/3.1.0"
)]
#![cfg_attr(test, deny(warnings))]
#![deny(rust_2018_idioms)]
#![allow(clippy::redundant_field_names)]
#![cfg_attr(all(feature = "nightly", target_os = "wasi"), feature(wasi_ext))]
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
const NUM_RETRIES: u32 = 1 << 31;
const NUM_RAND_CHARS: usize = 6;
use std::ffi::OsStr;
use std::fs::OpenOptions;
use std::path::Path;
use std::{env, io};
mod dir;
mod error;
mod file;
mod spooled;
mod util;
pub use crate::dir::{tempdir, tempdir_in, TempDir};
pub use crate::file::{
tempfile, tempfile_in, NamedTempFile, PathPersistError, PersistError, TempPath,
};
pub use crate::spooled::{spooled_tempfile, SpooledTempFile};
/// Create a new temporary file or directory with custom parameters.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Builder<'a, 'b> {
random_len: usize,
prefix: &'a OsStr,
suffix: &'b OsStr,
append: bool,
}
impl<'a, 'b> Default for Builder<'a, 'b> {
fn default() -> Self {
Builder {
random_len: crate::NUM_RAND_CHARS,
prefix: OsStr::new(".tmp"),
suffix: OsStr::new(""),
append: false,
}
}
}
impl<'a, 'b> Builder<'a, 'b> {
/// Create a new `Builder`.
///
/// # Examples
///
/// Create a named temporary file and write some data into it:
///
/// ```
/// # use std::io;
/// # use std::ffi::OsStr;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// use tempfile::Builder;
///
/// let named_tempfile = Builder::new()
/// .prefix("my-temporary-note")
/// .suffix(".txt")
/// .rand_bytes(5)
/// .tempfile()?;
///
/// let name = named_tempfile
/// .path()
/// .file_name().and_then(OsStr::to_str);
///
/// if let Some(name) = name {
/// assert!(name.starts_with("my-temporary-note"));
/// assert!(name.ends_with(".txt"));
/// assert_eq!(name.len(), "my-temporary-note.txt".len() + 5);
/// }
/// # Ok(())
/// # }
/// ```
///
/// Create a temporary directory and add a file to it:
///
/// ```
/// # use std::io::{self, Write};
/// # use std::fs::File;
/// # use std::ffi::OsStr;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// use tempfile::Builder;
///
/// let dir = Builder::new()
/// .prefix("my-temporary-dir")
/// .rand_bytes(5)
/// .tempdir()?;
///
/// let file_path = dir.path().join("my-temporary-note.txt");
/// let mut file = File::create(file_path)?;
/// writeln!(file, "Brian was here. Briefly.")?;
///
/// // By closing the `TempDir` explicitly, we can check that it has
/// // been deleted successfully. If we don't close it explicitly,
/// // the directory will still be deleted when `dir` goes out
/// // of scope, but we won't know whether deleting the directory
/// // succeeded.
/// drop(file);
/// dir.close()?;
/// # Ok(())
/// # }
/// ```
///
/// Create a temporary directory with a chosen prefix under a chosen folder:
///
/// ```ignore
/// let dir = Builder::new()
/// .prefix("my-temporary-dir")
/// .tempdir_in("folder-with-tempdirs")?;
/// ```
#[must_use]
pub fn new() -> Self {
Self::default()
}
/// Set a custom filename prefix.
///
/// Path separators are legal but not advisable.
/// Default: `.tmp`.
///
/// # Examples
///
/// ```
/// # use std::io;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// # use tempfile::Builder;
/// let named_tempfile = Builder::new()
/// .prefix("my-temporary-note")
/// .tempfile()?;
/// # Ok(())
/// # }
/// ```
pub fn prefix<S: AsRef<OsStr> + ?Sized>(&mut self, prefix: &'a S) -> &mut Self {
self.prefix = prefix.as_ref();
self
}
/// Set a custom filename suffix.
///
/// Path separators are legal but not advisable.
/// Default: empty.
///
/// # Examples
///
/// ```
/// # use std::io;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// # use tempfile::Builder;
/// let named_tempfile = Builder::new()
/// .suffix(".txt")
/// .tempfile()?;
/// # Ok(())
/// # }
/// ```
pub fn suffix<S: AsRef<OsStr> + ?Sized>(&mut self, suffix: &'b S) -> &mut Self {
self.suffix = suffix.as_ref();
self
}
/// Set the number of random bytes.
///
/// Default: `6`.
///
/// # Examples
///
/// ```
/// # use std::io;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// # use tempfile::Builder;
/// let named_tempfile = Builder::new()
/// .rand_bytes(5)
/// .tempfile()?;
/// # Ok(())
/// # }
/// ```
pub fn rand_bytes(&mut self, rand: usize) -> &mut Self {
self.random_len = rand;
self
}
/// Set the file to be opened in append mode.
///
/// Default: `false`.
///
/// # Examples
///
/// ```
/// # use std::io;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// # use tempfile::Builder;
/// let named_tempfile = Builder::new()
/// .append(true)
/// .tempfile()?;
/// # Ok(())
/// # }
/// ```
pub fn append(&mut self, append: bool) -> &mut Self {
self.append = append;
self
}
/// Create the named temporary file.
///
/// # Security
///
/// See [the security][security] docs on `NamedTempFile`.
///
/// # Resource leaking
///
/// See [the resource leaking][resource-leaking] docs on `NamedTempFile`.
///
/// # Errors
///
/// If the file cannot be created, `Err` is returned.
///
/// # Examples
///
/// ```
/// # use std::io;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// # use tempfile::Builder;
/// let tempfile = Builder::new().tempfile()?;
/// # Ok(())
/// # }
/// ```
///
/// [security]: struct.NamedTempFile.html#security
/// [resource-leaking]: struct.NamedTempFile.html#resource-leaking
pub fn tempfile(&self) -> io::Result<NamedTempFile> {
self.tempfile_in(env::temp_dir())
}
/// Create the named temporary file in the specified directory.
///
/// # Security
///
/// See [the security][security] docs on `NamedTempFile`.
///
/// # Resource leaking
///
/// See [the resource leaking][resource-leaking] docs on `NamedTempFile`.
///
/// # Errors
///
/// If the file cannot be created, `Err` is returned.
///
/// # Examples
///
/// ```
/// # use std::io;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// # use tempfile::Builder;
/// let tempfile = Builder::new().tempfile_in("./")?;
/// # Ok(())
/// # }
/// ```
///
/// [security]: struct.NamedTempFile.html#security
/// [resource-leaking]: struct.NamedTempFile.html#resource-leaking
pub fn tempfile_in<P: AsRef<Path>>(&self, dir: P) -> io::Result<NamedTempFile> {
util::create_helper(
dir.as_ref(),
self.prefix,
self.suffix,
self.random_len,
|path| file::create_named(path, OpenOptions::new().append(self.append)),
)
}
/// Attempts to make a temporary directory inside of `env::temp_dir()` whose
/// name will have the prefix, `prefix`. The directory and
/// everything inside it will be automatically deleted once the
/// returned `TempDir` is destroyed.
///
/// # Resource leaking
///
/// See [the resource leaking][resource-leaking] docs on `TempDir`.
///
/// # Errors
///
/// If the directory can not be created, `Err` is returned.
///
/// # Examples
///
/// ```
/// use std::fs::File;
/// use std::io::Write;
/// use tempfile::Builder;
///
/// # use std::io;
/// # fn run() -> Result<(), io::Error> {
/// let tmp_dir = Builder::new().tempdir()?;
/// # Ok(())
/// # }
/// ```
///
/// [resource-leaking]: struct.TempDir.html#resource-leaking
pub fn tempdir(&self) -> io::Result<TempDir> {
self.tempdir_in(env::temp_dir())
}
/// Attempts to make a temporary directory inside of `dir`.
/// The directory and everything inside it will be automatically
/// deleted once the returned `TempDir` is destroyed.
///
/// # Resource leaking
///
/// See [the resource leaking][resource-leaking] docs on `TempDir`.
///
/// # Errors
///
/// If the directory can not be created, `Err` is returned.
///
/// # Examples
///
/// ```
/// use std::fs::{self, File};
/// use std::io::Write;
/// use tempfile::Builder;
///
/// # use std::io;
/// # fn run() -> Result<(), io::Error> {
/// let tmp_dir = Builder::new().tempdir_in("./")?;
/// # Ok(())
/// # }
/// ```
///
/// [resource-leaking]: struct.TempDir.html#resource-leaking
pub fn tempdir_in<P: AsRef<Path>>(&self, dir: P) -> io::Result<TempDir> {
let storage;
let mut dir = dir.as_ref();
if !dir.is_absolute() {
let cur_dir = env::current_dir()?;
storage = cur_dir.join(dir);
dir = &storage;
}
util::create_helper(dir, self.prefix, self.suffix, self.random_len, dir::create)
}
/// Attempts to create a temporary file (or file-like object) using the
/// provided closure. The closure is passed a temporary file path and
/// returns an [`std::io::Result`]. The path provided to the closure will be
/// inside of [`std::env::temp_dir()`]. Use [`Builder::make_in`] to provide
/// a custom temporary directory. If the closure returns one of the
/// following errors, then another randomized file path is tried:
/// - [`std::io::ErrorKind::AlreadyExists`]
/// - [`std::io::ErrorKind::AddrInUse`]
///
/// This can be helpful for taking full control over the file creation, but
/// leaving the temporary file path construction up to the library. This
/// also enables creating a temporary UNIX domain socket, since it is not
/// possible to bind to a socket that already exists.
///
/// Note that [`Builder::append`] is ignored when using [`Builder::make`].
///
/// # Security
///
/// This has the same [security implications][security] as
/// [`NamedTempFile`], but with additional caveats. Specifically, it is up
/// to the closure to ensure that the file does not exist and that such a
/// check is *atomic*. Otherwise, a [time-of-check to time-of-use
/// bug][TOCTOU] could be introduced.
///
/// For example, the following is **not** secure:
///
/// ```
/// # use std::io;
/// # use std::fs::File;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// # use tempfile::Builder;
/// // This is NOT secure!
/// let tempfile = Builder::new().make(|path| {
/// if path.is_file() {
/// return Err(io::ErrorKind::AlreadyExists.into());
/// }
///
/// // Between the check above and the usage below, an attacker could
/// // have replaced `path` with another file, which would get truncated
/// // by `File::create`.
///
/// File::create(path)
/// })?;
/// # Ok(())
/// # }
/// ```
/// Note that simply using [`std::fs::File::create`] alone is not correct
/// because it does not fail if the file already exists:
/// ```
/// # use std::io;
/// # use std::fs::File;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// # use tempfile::Builder;
/// // This could overwrite an existing file!
/// let tempfile = Builder::new().make(|path| File::create(path))?;
/// # Ok(())
/// # }
/// ```
/// For creating regular temporary files, use [`Builder::tempfile`] instead
/// to avoid these problems. This function is meant to enable more exotic
/// use-cases.
///
/// # Resource leaking
///
/// See [the resource leaking][resource-leaking] docs on `NamedTempFile`.
///
/// # Errors
///
/// If the closure returns any error besides
/// [`std::io::ErrorKind::AlreadyExists`] or
/// [`std::io::ErrorKind::AddrInUse`], then `Err` is returned.
///
/// # Examples
/// ```
/// # use std::io;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// # use tempfile::Builder;
/// # #[cfg(unix)]
/// use std::os::unix::net::UnixListener;
/// # #[cfg(unix)]
/// let tempsock = Builder::new().make(|path| UnixListener::bind(path))?;
/// # Ok(())
/// # }
/// ```
///
/// [TOCTOU]: https://en.wikipedia.org/wiki/Time-of-check_to_time-of-use
/// [security]: struct.NamedTempFile.html#security
/// [resource-leaking]: struct.NamedTempFile.html#resource-leaking
pub fn make<F, R>(&self, f: F) -> io::Result<NamedTempFile<R>>
where
F: FnMut(&Path) -> io::Result<R>,
{
self.make_in(env::temp_dir(), f)
}
/// This is the same as [`Builder::make`], except `dir` is used as the base
/// directory for the temporary file path.
///
/// See [`Builder::make`] for more details and security implications.
///
/// # Examples
/// ```
/// # use std::io;
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// # use tempfile::Builder;
/// # #[cfg(unix)]
/// use std::os::unix::net::UnixListener;
/// # #[cfg(unix)]
/// let tempsock = Builder::new().make_in("./", |path| UnixListener::bind(path))?;
/// # Ok(())
/// # }
/// ```
pub fn make_in<F, R, P>(&self, dir: P, mut f: F) -> io::Result<NamedTempFile<R>>
where
F: FnMut(&Path) -> io::Result<R>,
P: AsRef<Path>,
{
util::create_helper(
dir.as_ref(),
self.prefix,
self.suffix,
self.random_len,
move |path| {
Ok(NamedTempFile::from_parts(
f(&path)?,
TempPath::from_path(path),
))
},
)
}
}
|
use std::{fs::File, io::Write};
/// Testing out the zobrist hashing
use pacosako::{zobrist, DenseBoard};
use rand::RngCore;
fn main() {
let board = DenseBoard::new();
println!("{}", zobrist::fresh_zobrist(&board));
build_file("data/zobrist.txt", 12 * 64 * 2).unwrap();
}
pub fn build_file(path: &str, size: usize) -> Result<(), std::io::Error> {
let mut rng = rand::thread_rng();
let mut file = File::create(path)?;
write!(file, "[")?;
write!(file, "{}", rng.next_u64())?;
for _ in 1..size {
write!(file, ", {}", rng.next_u64())?;
}
write!(file, "u64]")?;
Ok(())
}
|
use rand::{thread_rng, Rng};
use std::time::{Duration, Instant};
/// Make a zero delay backoff
pub fn instant() -> impl Backoff + Sized {
Duration::from_secs(0)
}
/// Make a constant duration backoff
pub fn constant(duration: Duration) -> impl Backoff + Sized {
duration
}
pub trait Backoff: Send {
/// Get the duration to wait for before attempting again
fn next_retry(&mut self) -> Option<Duration>;
/// Grow the backoff duration exponentially
fn exponential(self) -> Exponential<Self>
where
Self: Sized,
{
Exponential {
factor: 1,
inner: self,
}
}
/// Set the maximum backoff duration
fn max_backoff(self, max: Duration) -> Max<Self>
where
Self: Sized,
{
Max { max, inner: self }
}
/// Set the minimum backoff duration
fn min_backoff(self, min: Duration) -> Min<Self>
where
Self: Sized,
{
Min { min, inner: self }
}
/// Randomize the backoff duration.
///
/// The returned duration will never be larger than the base duration and will
/// never be smaller than `base * (1.0 - scale)`.
fn jitter(self, scale: f64) -> Jitter<Self>
where
Self: Sized,
{
assert!(scale > 0.0, "scale must be larger than zero");
assert!(scale <= 1.0, "scale must be smaller or equal to one");
Jitter { scale, inner: self }
}
fn num_attempts(self, num: u32) -> MaxAttempts<Self>
where
Self: Sized,
{
assert!(num > 0, "num must be larger than zero");
let num_attempts_left = num - 1;
MaxAttempts {
num_attempts_left,
inner: self,
}
}
fn deadline(self, deadline: Instant) -> Deadline<Self>
where
Self: Sized,
{
Deadline {
deadline,
inner: self,
}
}
}
impl Backoff for Duration {
fn next_retry(&mut self) -> Option<Duration> {
Some(*self)
}
}
pub struct Exponential<S>
where
S: Backoff,
{
inner: S,
factor: u32,
}
impl<S> Backoff for Exponential<S>
where
S: Backoff,
{
fn next_retry(&mut self) -> Option<Duration> {
let dur = self.inner.next_retry().map(|dur| dur * (self.factor as _));
self.factor *= 2;
dur
}
}
pub struct Max<S>
where
S: Backoff,
{
inner: S,
max: Duration,
}
impl<S> Backoff for Max<S>
where
S: Backoff,
{
fn next_retry(&mut self) -> Option<Duration> {
self.inner
.next_retry()
.map(|dur| std::cmp::min(self.max, dur))
}
}
pub struct Min<S>
where
S: Backoff,
{
inner: S,
min: Duration,
}
impl<S> Backoff for Min<S>
where
S: Backoff,
{
fn next_retry(&mut self) -> Option<Duration> {
self.inner
.next_retry()
.map(|dur| std::cmp::max(self.min, dur))
}
}
pub struct Jitter<S>
where
S: Backoff,
{
inner: S,
scale: f64,
}
impl<S> Backoff for Jitter<S>
where
S: Backoff,
{
fn next_retry(&mut self) -> Option<Duration> {
self.inner.next_retry().map(|dur| {
let margin = Duration::from_secs_f64(dur.as_secs_f64() * self.scale);
thread_rng().gen_range(dur - margin, dur)
})
}
}
pub struct MaxAttempts<S>
where
S: Backoff,
{
inner: S,
num_attempts_left: u32,
}
impl<S> Backoff for MaxAttempts<S>
where
S: Backoff,
{
fn next_retry(&mut self) -> Option<Duration> {
if self.num_attempts_left > 0 {
self.num_attempts_left -= 1;
self.inner.next_retry()
} else {
None
}
}
}
pub struct Deadline<S>
where
S: Backoff,
{
inner: S,
deadline: Instant,
}
impl<S> Backoff for Deadline<S>
where
S: Backoff,
{
fn next_retry(&mut self) -> Option<Duration> {
if self.deadline < Instant::now() {
None
} else {
self.inner.next_retry()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_instant() {
let mut bo = instant();
assert_eq!(bo.next_retry(), Some(Duration::from_secs(0)));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(0)));
}
#[test]
fn test_constant() {
let mut bo = constant(Duration::from_secs(5));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(5)));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(5)));
}
#[test]
fn test_min_backoff() {
let mut bo = constant(Duration::from_secs(5)).min_backoff(Duration::from_secs(10));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(10)));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(10)));
let mut bo = constant(Duration::from_secs(5)).min_backoff(Duration::from_secs(3));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(5)));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(5)));
}
#[test]
fn test_max_backoff() {
let mut bo = constant(Duration::from_secs(5)).max_backoff(Duration::from_secs(10));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(5)));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(5)));
let mut bo = constant(Duration::from_secs(5)).max_backoff(Duration::from_secs(3));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(3)));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(3)));
}
#[test]
fn test_exponential() {
let mut bo = constant(Duration::from_secs(1)).exponential();
assert_eq!(bo.next_retry(), Some(Duration::from_secs(1)));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(2)));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(4)));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(8)));
}
#[test]
fn test_jitter() {
let mut bo = constant(Duration::from_secs(1)).jitter(0.1);
let range = Duration::from_millis(900)..=Duration::from_secs(1);
for _i in 0..100_000 {
let dur = bo.next_retry().unwrap();
assert!(range.contains(&dur));
}
}
#[test]
fn test_num_attempts() {
let mut bo = constant(Duration::from_secs(1)).num_attempts(3);
assert_eq!(bo.next_retry(), Some(Duration::from_secs(1)));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(1)));
assert_eq!(bo.next_retry(), None);
assert_eq!(bo.next_retry(), None);
}
#[test]
fn deadline() {
let mut bo =
constant(Duration::from_secs(1)).deadline(Instant::now() + Duration::from_millis(20));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(1)));
assert_eq!(bo.next_retry(), Some(Duration::from_secs(1)));
std::thread::sleep(Duration::from_millis(21));
assert_eq!(bo.next_retry(), None);
assert_eq!(bo.next_retry(), None);
}
}
|
#[doc = "Register `NSSR` reader"]
pub type R = crate::R<NSSR_SPEC>;
#[doc = "Register `NSSR` writer"]
pub type W = crate::W<NSSR_SPEC>;
#[doc = "Field `NSEOP` reader - NSEOP"]
pub type NSEOP_R = crate::BitReader;
#[doc = "Field `NSEOP` writer - NSEOP"]
pub type NSEOP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `NSOPERR` reader - NSOPERR"]
pub type NSOPERR_R = crate::BitReader;
#[doc = "Field `NSOPERR` writer - NSOPERR"]
pub type NSOPERR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `NSPROGERR` reader - NSPROGERR"]
pub type NSPROGERR_R = crate::BitReader;
#[doc = "Field `NSPROGERR` writer - NSPROGERR"]
pub type NSPROGERR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `NSWRPERR` reader - NSWRPERR"]
pub type NSWRPERR_R = crate::BitReader;
#[doc = "Field `NSWRPERR` writer - NSWRPERR"]
pub type NSWRPERR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `NSPGAERR` reader - NSPGAERR"]
pub type NSPGAERR_R = crate::BitReader;
#[doc = "Field `NSPGAERR` writer - NSPGAERR"]
pub type NSPGAERR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `NSSIZERR` reader - NSSIZERR"]
pub type NSSIZERR_R = crate::BitReader;
#[doc = "Field `NSSIZERR` writer - NSSIZERR"]
pub type NSSIZERR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `NSPGSERR` reader - NSPGSERR"]
pub type NSPGSERR_R = crate::BitReader;
#[doc = "Field `NSPGSERR` writer - NSPGSERR"]
pub type NSPGSERR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OPTWERR` reader - OPTWERR"]
pub type OPTWERR_R = crate::BitReader;
#[doc = "Field `OPTWERR` writer - OPTWERR"]
pub type OPTWERR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OPTVERR` reader - OPTVERR"]
pub type OPTVERR_R = crate::BitReader;
#[doc = "Field `OPTVERR` writer - OPTVERR"]
pub type OPTVERR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `NSBSY` reader - NSBusy"]
pub type NSBSY_R = crate::BitReader;
impl R {
#[doc = "Bit 0 - NSEOP"]
#[inline(always)]
pub fn nseop(&self) -> NSEOP_R {
NSEOP_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - NSOPERR"]
#[inline(always)]
pub fn nsoperr(&self) -> NSOPERR_R {
NSOPERR_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 3 - NSPROGERR"]
#[inline(always)]
pub fn nsprogerr(&self) -> NSPROGERR_R {
NSPROGERR_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - NSWRPERR"]
#[inline(always)]
pub fn nswrperr(&self) -> NSWRPERR_R {
NSWRPERR_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - NSPGAERR"]
#[inline(always)]
pub fn nspgaerr(&self) -> NSPGAERR_R {
NSPGAERR_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - NSSIZERR"]
#[inline(always)]
pub fn nssizerr(&self) -> NSSIZERR_R {
NSSIZERR_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - NSPGSERR"]
#[inline(always)]
pub fn nspgserr(&self) -> NSPGSERR_R {
NSPGSERR_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 13 - OPTWERR"]
#[inline(always)]
pub fn optwerr(&self) -> OPTWERR_R {
OPTWERR_R::new(((self.bits >> 13) & 1) != 0)
}
#[doc = "Bit 15 - OPTVERR"]
#[inline(always)]
pub fn optverr(&self) -> OPTVERR_R {
OPTVERR_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bit 16 - NSBusy"]
#[inline(always)]
pub fn nsbsy(&self) -> NSBSY_R {
NSBSY_R::new(((self.bits >> 16) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - NSEOP"]
#[inline(always)]
#[must_use]
pub fn nseop(&mut self) -> NSEOP_W<NSSR_SPEC, 0> {
NSEOP_W::new(self)
}
#[doc = "Bit 1 - NSOPERR"]
#[inline(always)]
#[must_use]
pub fn nsoperr(&mut self) -> NSOPERR_W<NSSR_SPEC, 1> {
NSOPERR_W::new(self)
}
#[doc = "Bit 3 - NSPROGERR"]
#[inline(always)]
#[must_use]
pub fn nsprogerr(&mut self) -> NSPROGERR_W<NSSR_SPEC, 3> {
NSPROGERR_W::new(self)
}
#[doc = "Bit 4 - NSWRPERR"]
#[inline(always)]
#[must_use]
pub fn nswrperr(&mut self) -> NSWRPERR_W<NSSR_SPEC, 4> {
NSWRPERR_W::new(self)
}
#[doc = "Bit 5 - NSPGAERR"]
#[inline(always)]
#[must_use]
pub fn nspgaerr(&mut self) -> NSPGAERR_W<NSSR_SPEC, 5> {
NSPGAERR_W::new(self)
}
#[doc = "Bit 6 - NSSIZERR"]
#[inline(always)]
#[must_use]
pub fn nssizerr(&mut self) -> NSSIZERR_W<NSSR_SPEC, 6> {
NSSIZERR_W::new(self)
}
#[doc = "Bit 7 - NSPGSERR"]
#[inline(always)]
#[must_use]
pub fn nspgserr(&mut self) -> NSPGSERR_W<NSSR_SPEC, 7> {
NSPGSERR_W::new(self)
}
#[doc = "Bit 13 - OPTWERR"]
#[inline(always)]
#[must_use]
pub fn optwerr(&mut self) -> OPTWERR_W<NSSR_SPEC, 13> {
OPTWERR_W::new(self)
}
#[doc = "Bit 15 - OPTVERR"]
#[inline(always)]
#[must_use]
pub fn optverr(&mut self) -> OPTVERR_W<NSSR_SPEC, 15> {
OPTVERR_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Flash status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`nssr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`nssr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct NSSR_SPEC;
impl crate::RegisterSpec for NSSR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`nssr::R`](R) reader structure"]
impl crate::Readable for NSSR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`nssr::W`](W) writer structure"]
impl crate::Writable for NSSR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets NSSR to value 0"]
impl crate::Resettable for NSSR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
//! Reset and Clock Control
#![deny(missing_docs)]
use crate::pwr::VoltageScale as Voltage;
use crate::stm32::rcc::cfgr::SW_A as SW;
use crate::stm32::rcc::cfgr::TIMPRE_A as TIMPRE;
use crate::stm32::rcc::d1ccipr::CKPERSEL_A as CKPERSEL;
use crate::stm32::rcc::d1cfgr::HPRE_A as HPRE;
use crate::stm32::rcc::pllckselr::PLLSRC_A as PLLSRC;
use crate::stm32::{rcc, RCC, SYSCFG};
use crate::time::Hertz;
/// This module configures the RCC unit to provide set frequencies for
/// the input to the SCGU `sys_ck`, the AMBA High-performace Busses
/// and Advanced eXtensible Interface bus `hclk`, the AMBA Peripheral
/// Busses `pclkN` and the periperal clock `per_ck`.
///
/// Check Fig 46 "Core and bus clock generation" in the reference
/// manual for information (p 336).
///
/// HSI is 64 MHz.
///
/// Extension trait that constrains the `RCC` peripheral
pub trait RccExt {
/// Constrains the `RCC` peripheral so it plays nicely with the
/// other abstractions
fn constrain(self) -> Rcc;
}
impl RccExt for RCC {
fn constrain(self) -> Rcc {
Rcc {
config: Config {
hse: None,
sys_ck: None,
per_ck: None,
rcc_hclk: None,
rcc_pclk1: None,
rcc_pclk2: None,
rcc_pclk3: None,
rcc_pclk4: None,
pll1: PllConfig {
p_ck: None,
q_ck: None,
r_ck: None,
},
pll2: PllConfig {
p_ck: None,
q_ck: None,
r_ck: None,
},
pll3: PllConfig {
p_ck: None,
q_ck: None,
r_ck: None,
},
},
rb: self,
}
}
}
/// Constrained RCC peripheral
///
/// Generated by calling `constrain` on the PAC's RCC peripheral.
///
/// ```rust
/// let dp = stm32::Peripherals::take().unwrap();
/// let rcc = dp.RCC.constrain();
/// ```
pub struct Rcc {
config: Config,
pub(crate) rb: RCC,
}
/// Core Clock Distribution and Reset (CCDR)
///
/// Generated when the RCC is frozen. The configuration of the Sys_Ck
/// `sys_ck`, CPU Clock `c_ck`, AXI peripheral clock `aclk`, AHB
/// clocks `hclk`, APB clocks `pclkN` and PLL outputs `pllN_X_ck` are
/// frozen. However the distribution of some clocks may still be
/// modified and peripherals enabled / reset by passing this object
/// to other implementations in this stack.
pub struct Ccdr {
/// A record of the frozen core clock frequencies
pub clocks: CoreClocks,
/// AMBA High-performance Bus (AHB1) registers
pub ahb1: AHB1,
/// AMBA High-performance Bus (AHB2) registers
pub ahb2: AHB2,
/// AMBA High-performance Bus (AHB3) registers
pub ahb3: AHB3,
/// AMBA High-performance Bus (AHB4) registers
pub ahb4: AHB4,
/// Advanced Peripheral Bus 1L (APB1L) registers
pub apb1l: APB1L,
/// Advanced Peripheral Bus 1H (APB1H) registers
pub apb1h: APB1H,
/// Advanced Peripheral Bus 2 (APB2) registers
pub apb2: APB2,
/// Advanced Peripheral Bus 3 (APB3) registers
pub apb3: APB3,
/// Advanced Peripheral Bus 4 (APB4) registers
pub apb4: APB4,
/// RCC Domain 3 Kernel Clock Configuration Register
pub d3ccipr: D3CCIPR,
// Yes, it lives (locally)! We retain the right to switch most
// PKSUs on the fly, to fine-tune PLL frequencies, and to enable /
// reset peripherals.
//
// TODO: Remove this once all permitted RCC register accesses
// after freeze are enumerated in this struct
pub(crate) rb: RCC,
}
macro_rules! ahb_apb_generation {
($(($AXBn:ident, $AXBnENR:ident, $axbnenr:ident, $AXBnRSTR:ident, $axbnrstr:ident, $doc:expr)),+) => {
$(
#[doc=$doc]
pub struct $AXBn {
_0: (),
}
impl $AXBn {
#[allow(unused)]
pub (crate) fn enr(&mut self) -> &rcc::$AXBnENR {
// NOTE(unsafe) this proxy grants exclusive access to this register
unsafe { &(*RCC::ptr()).$axbnenr }
}
#[allow(unused)]
pub (crate) fn rstr(&mut self) -> &rcc::$AXBnRSTR {
// NOTE(unsafe) this proxy grants exclusive access to this register
unsafe { &(*RCC::ptr()).$axbnrstr }
}
}
)+
}
}
ahb_apb_generation!(
(
AHB1,
AHB1ENR,
ahb1enr,
AHB1RSTR,
ahb1rstr,
"AMBA High-performance Bus (AHB1) registers"
),
(
AHB2,
AHB2ENR,
ahb2enr,
AHB2RSTR,
ahb2rstr,
"AMBA High-performance Bus (AHB2) registers"
),
(
AHB3,
AHB3ENR,
ahb3enr,
AHB3RSTR,
ahb3rstr,
"AMBA High-performance Bus (AHB3) registers"
),
(
AHB4,
AHB4ENR,
ahb4enr,
AHB4RSTR,
ahb4rstr,
"AMBA High-performance Bus (AHB4) registers"
),
(
APB1L,
APB1LENR,
apb1lenr,
APB1LRSTR,
apb1lrstr,
"Advanced Peripheral Bus 1L (APB1L) registers"
),
(
APB1H,
APB1HENR,
apb1henr,
APB1HRSTR,
apb1hrstr,
"Advanced Peripheral Bus 1H (APB1H) registers"
),
(
APB2,
APB2ENR,
apb2enr,
APB2RSTR,
apb2rstr,
"Advanced Peripheral Bus 2 (APB2) registers"
),
(
APB3,
APB3ENR,
apb3enr,
APB3RSTR,
apb3rstr,
"Advanced Peripheral Bus 3 (APB3) registers"
),
(
APB4,
APB4ENR,
apb4enr,
APB4RSTR,
apb4rstr,
"Advanced Peripheral Bus 4 (APB4) registers"
)
);
/// RCC Domain 3 Kernel Clock Configuration Register
pub struct D3CCIPR {
_0: (),
}
impl D3CCIPR {
pub(crate) fn kernel_ccip(&mut self) -> &rcc::D3CCIPR {
unsafe { &(*RCC::ptr()).d3ccipr }
}
}
const HSI: u32 = 64_000_000; // Hz
const CSI: u32 = 4_000_000; // Hz
const HSI48: u32 = 48_000_000; // Hz
/// Configuration of a Phase Lock Loop (PLL)
pub struct PllConfig {
p_ck: Option<u32>,
q_ck: Option<u32>,
r_ck: Option<u32>,
}
/// Configuration of the core clocks
pub struct Config {
hse: Option<u32>,
sys_ck: Option<u32>,
per_ck: Option<u32>,
rcc_hclk: Option<u32>,
rcc_pclk1: Option<u32>,
rcc_pclk2: Option<u32>,
rcc_pclk3: Option<u32>,
rcc_pclk4: Option<u32>,
pll1: PllConfig,
pll2: PllConfig,
pll3: PllConfig,
}
/// Setter defintion for pclk 1 - 4
macro_rules! pclk_setter {
($($name:ident: $pclk:ident,)+) => {
$(
/// Set the peripheral clock frequency for APB
/// peripherals.
pub fn $name<F>(mut self, freq: F) -> Self
where
F: Into<Hertz>,
{
self.config.$pclk = Some(freq.into().0);
self
}
)+
};
}
/// Setter definition for pll 1 - 3
macro_rules! pll_setter {
($($pll:ident: [ $($name:ident: $ck:ident,)+ ],)+) => {
$(
$(
/// Set the target clock frequency for PLL output
pub fn $name<F>(mut self, freq: F) -> Self
where
F: Into<Hertz>,
{
self.config.$pll.$ck = Some(freq.into().0);
self
}
)+
)+
};
}
impl Rcc {
/// Uses HSE (external oscillator) instead of HSI (internal RC
/// oscillator) as the clock source. Will result in a hang if an
/// external oscillator is not connected or it fails to start.
pub fn use_hse<F>(mut self, freq: F) -> Self
where
F: Into<Hertz>,
{
self.config.hse = Some(freq.into().0);
self
}
/// Set input frequency to the SCGU
pub fn sys_ck<F>(mut self, freq: F) -> Self
where
F: Into<Hertz>,
{
self.config.sys_ck = Some(freq.into().0);
self
}
/// Set input frequency to the SCGU - ALIAS
pub fn sysclk<F>(mut self, freq: F) -> Self
where
F: Into<Hertz>,
{
self.config.sys_ck = Some(freq.into().0);
self
}
/// Set peripheral clock frequency
pub fn per_ck<F>(mut self, freq: F) -> Self
where
F: Into<Hertz>,
{
self.config.per_ck = Some(freq.into().0);
self
}
/// Set the peripheral clock frequency for AHB and AXI
/// peripherals. There are several gated versions `rcc_hclk[1-4]`
/// for different power domains, but they are all the same frequency
pub fn hclk<F>(mut self, freq: F) -> Self
where
F: Into<Hertz>,
{
self.config.rcc_hclk = Some(freq.into().0);
self
}
pclk_setter! {
pclk1: rcc_pclk1,
pclk2: rcc_pclk2,
pclk3: rcc_pclk3,
pclk4: rcc_pclk4,
}
pll_setter! {
pll1: [
pll1_p_ck: p_ck,
pll1_q_ck: q_ck,
pll1_r_ck: r_ck,
],
pll2: [
pll2_p_ck: p_ck,
pll2_q_ck: q_ck,
pll2_r_ck: r_ck,
],
pll3: [
pll3_p_ck: p_ck,
pll3_q_ck: q_ck,
pll3_r_ck: r_ck,
],
}
}
/// Divider calculator for pclk 1 - 4
///
/// Also calulate tim[xy]_ker_clk if there are timers on this bus
macro_rules! ppre_calculate {
($(($ppre:ident, $bits:ident): ($self: ident, $hclk: ident,
$pclk: ident, $max: ident
$(,$rcc_tim_ker_clk:ident, $timpre:ident)*),)+) => {
$(
// Get intended rcc_pclkN frequency
let $pclk: u32 = $self.config
.$pclk
.unwrap_or_else(|| core::cmp::min($max, $hclk / 2));
// Calculate suitable divider
let ($bits, $ppre) = match ($hclk + $pclk - 1) / $pclk
{
0 => unreachable!(),
1 => (0b000, 1 as u8),
2 => (0b100, 2),
3..=5 => (0b101, 4),
6..=11 => (0b110, 8),
_ => (0b111, 16),
};
// Calculate real APBn clock
let $pclk = $hclk / u32::from($ppre);
// Check in range
assert!($pclk <= $max);
$(
let $rcc_tim_ker_clk = match ($bits, &$timpre)
{
(0b101, TIMPRE::DEFAULTX2) => $hclk / 2,
(0b110, TIMPRE::DEFAULTX4) => $hclk / 2,
(0b110, TIMPRE::DEFAULTX2) => $hclk / 4,
(0b111, TIMPRE::DEFAULTX4) => $hclk / 4,
(0b111, TIMPRE::DEFAULTX2) => $hclk / 8,
_ => $hclk,
};
)*
)+
};
}
/// Setup PFD input frequency and VCO output frequency
///
macro_rules! vco_setup {
// VCOL, highest PFD frequency, highest VCO frequency
(NORMAL: $pllsrc:ident, $output:ident,
$rcc:ident, $pllXvcosel:ident, $pllXrge:ident $(,$pll1_p:ident)*) => {{
// Input divisor, resulting in a reference clock in the
// range 1 to 2 MHz. Choose the highest reference clock
let pll_x_m = ($pllsrc + 1_999_999) / 2_000_000;
assert!(pll_x_m < 64);
// Calculate resulting reference clock
let ref_x_ck = $pllsrc / pll_x_m;
assert!(ref_x_ck >= 1_000_000 && ref_x_ck <= 2_000_000);
// VCO output frequency. Choose the highest VCO frequency
let vco_min = 150_000_000;
let vco_max = 420_000_000;
// Macro-based selection
let pll_x_p = match true {
$(
// Specific to PLL1
true => {
let $pll1_p = if $output > vco_max / 2 {
1
} else {
((vco_max / $output) | 1) - 1 // Must be even or unity
};
$pll1_p
},
)*
// Specific to PLL2/3
_ => if $output > vco_max / 2 {
1
} else {
vco_max / $output
}
};
// Calcuate VCO output
let vco_ck = $output * pll_x_p;
assert!(pll_x_p <= 128);
assert!(vco_ck >= vco_min);
assert!(vco_ck <= vco_max);
// Configure VCO
$rcc.pllcfgr.modify(|_, w| {
w.$pllXvcosel()
.medium_vco() // 150 - 420MHz Medium VCO
.$pllXrge()
.range1() // ref_x_ck is 1 - 2 MHz
});
(ref_x_ck, pll_x_m, pll_x_p, vco_ck)
}};
}
macro_rules! pll_setup {
($pll_setup:ident: ($pllXvcosel:ident, $pllXrge:ident, $pllXfracen:ident,
$pllXdivr:ident, $divnX:ident, $divmX:ident,
OUTPUTS: [ $($CK:ident:
($div:ident, $diven:ident, $DD:tt $(,$unsafe:ident)*)),+ ]
$(,$pll1_p:ident)*
)) => {
/// PLL Setup
/// Returns (Option(pllX_p_ck), Option(pllX_q_ck), Option(pllX_r_ck))
fn $pll_setup(
&self,
rcc: &RCC,
pll: &PllConfig,
) -> (Option<Hertz>, Option<Hertz>, Option<Hertz>) {
// PLL sourced from either HSE or HSI
let pllsrc = self.config.hse.unwrap_or(HSI);
assert!(pllsrc > 0);
// PLL output
match pll.p_ck {
Some(output) => {
// Use the Medium Range VCO with 1 - 2 MHz input
let (ref_x_ck, pll_x_m, pll_x_p, vco_ck) = {
vco_setup! { NORMAL: pllsrc, output, rcc,
$pllXvcosel, $pllXrge $(, $pll1_p)* }
};
// Feedback divider. Integer only
let pll_x_n = vco_ck / ref_x_ck;
// Write dividers
rcc.pllckselr.modify(|_, w| {
w.$divmX().bits(pll_x_m as u8) // ref prescaler
});
// unsafe as not all values are permitted: see RM0433
assert!(pll_x_n >= 4);
assert!(pll_x_n <= 512);
rcc.$pllXdivr
.modify(|_, w| unsafe { w.$divnX().bits((pll_x_n - 1) as u16) });
// Configure PLL
rcc.pllcfgr.modify(|_, w| {
w.$pllXfracen().reset() // No FRACN
});
// Calulate additional output dividers
let pll_x_q = match pll.q_ck {
Some(ck) => (vco_ck + ck - 1) / ck,
None => 0
};
let pll_x_r = match pll.r_ck {
Some(ck) => (vco_ck + ck - 1) / ck,
None => 0
};
let dividers = (pll_x_p, pll_x_q, pll_x_r);
// Setup and return output clocks
($(
// Enable based on config
match pll.$CK {
Some(_) => {
// Setup divider
rcc.$pllXdivr
.modify(|_, w| $($unsafe)* {
w.$div().bits((dividers.$DD - 1) as u8)
});
rcc.pllcfgr.modify(|_, w| w.$diven().enabled());
Some(Hertz(ref_x_ck * pll_x_n / dividers.$DD))
}
None => {
rcc.pllcfgr.modify(|_, w| w.$diven().disabled());
None
}
},
)+)
},
None => {
assert!(pll.q_ck.is_none(), "Must set PLL P clock for Q clock to take effect!");
assert!(pll.r_ck.is_none(), "Must set PLL P clock for R clock to take effect!");
(None, None, None)
}
}
}
};
}
impl Rcc {
pll_setup! {
pll1_setup: (pll1vcosel, pll1rge, pll1fracen, pll1divr, divn1, divm1,
OUTPUTS: [
// unsafe as not all values are permitted: see RM0433
p_ck: (divp1, divp1en, 0, unsafe),
q_ck: (divq1, divq1en, 1),
r_ck: (divr1, divr1en, 2) ],
pll1_p)
}
pll_setup! {
pll2_setup: (pll2vcosel, pll2rge, pll2fracen, pll2divr, divn2, divm2,
OUTPUTS: [
p_ck: (divp2, divp2en, 0),
q_ck: (divq2, divq2en, 1),
r_ck: (divr2, divr2en, 2)])
}
pll_setup! {
pll3_setup: (pll3vcosel, pll3rge, pll3fracen, pll3divr, divn3, divm3,
OUTPUTS: [
p_ck: (divp3, divp3en, 0),
q_ck: (divq3, divq3en, 1),
r_ck: (divr3, divr3en, 2)])
}
fn flash_setup(rcc_aclk: u32, vos: Voltage) {
use crate::stm32::FLASH;
let rcc_aclk_mhz = rcc_aclk / 1_000_000;
// See RM0433 Table 13. FLASH recommended number of wait
// states and programming delay
let (wait_states, progr_delay) = match vos {
// VOS 1 range VCORE 1.15V - 1.26V
Voltage::Scale0 | Voltage::Scale1 => match rcc_aclk_mhz {
0..=69 => (0, 0),
70..=139 => (1, 1),
140..=184 => (2, 1),
185..=209 => (2, 2),
210..=224 => (3, 2),
_ => (7, 3),
},
// VOS 2 range VCORE 1.05V - 1.15V
Voltage::Scale2 => match rcc_aclk_mhz {
0..=54 => (0, 0),
55..=109 => (1, 1),
110..=164 => (2, 1),
165..=224 => (3, 2),
225 => (4, 2),
_ => (7, 3),
},
// VOS 3 range VCORE 0.95V - 1.05V
Voltage::Scale3 => match rcc_aclk_mhz {
0..=44 => (0, 0),
45..=89 => (1, 1),
90..=134 => (2, 1),
135..=179 => (3, 2),
180..=224 => (4, 2),
_ => (7, 3),
},
};
let flash = unsafe { &(*FLASH::ptr()) };
// Adjust flash wait states
flash.acr.write(|w| unsafe {
w.wrhighfreq().bits(progr_delay).latency().bits(wait_states)
});
while flash.acr.read().latency().bits() != wait_states {}
}
/// Setup sys_ck
/// Returns sys_ck frequency, and a pll1_p_ck
fn sys_ck_setup(&self) -> (Hertz, Option<u32>, bool) {
// Compare available with wanted clocks
let srcclk = self.config.hse.unwrap_or(HSI); // Available clocks
let sys_ck = self.config.sys_ck.unwrap_or(srcclk);
// The requested system clock is not the immediately available
// HSE/HSI clock. Perhaps there are other ways of obtaining
// the requested system clock (such as `HSIDIV`) but we will
// ignore those for now.
if sys_ck != srcclk {
// Therefore we must use pll1_p_ck
let pll1_p_ck = match self.config.pll1.p_ck {
Some(p_ck) => {
assert!(p_ck == sys_ck,
"Error: Cannot set pll1_p_ck independently as it must be used to generate sys_ck");
Some(p_ck)
}
None => Some(sys_ck),
};
(Hertz(sys_ck), pll1_p_ck, true)
} else {
// sys_ck is derived directly from a source clock
// (HSE/HSI). pll1_p_ck can be as requested
(Hertz(sys_ck), self.config.pll1.p_ck, false)
}
}
/// Setup traceclk
/// Returns a pll1_r_ck
fn traceclk_setup(
&self,
sys_use_pll1_p: bool,
pll1_p_ck: Option<u32>,
) -> Option<u32> {
let pll1_r_ck = match (sys_use_pll1_p, self.config.pll1.r_ck) {
// pll1_p_ck selected as system clock but pll1_r_ck not
// set. The traceclk mux is synchronous with the system
// clock mux, but has pll1_r_ck as an input. In order to
// keep traceclk running, we force a pll1_r_ck.
(true, None) => Some(pll1_p_ck.unwrap() / 2),
// Either pll1 not selected as system clock, free choice
// of pll1_r_ck. Or pll1 is selected, assume user has set
// a suitable pll1_r_ck frequency.
_ => self.config.pll1.r_ck,
};
pll1_r_ck
}
/// Freeze the core clocks, returning a Core Clocks Distribution
/// and Reset (CCDR) object.
///
/// `syscfg` is required to enable the I/O compensation cell.
pub fn freeze(self, vos: Voltage, syscfg: &SYSCFG) -> Ccdr {
let rcc = &self.rb;
// We do not reset RCC here. This routine must assert when
// the previous state of the RCC peripheral is unacceptable.
// sys_ck from PLL if needed, else HSE or HSI
let (sys_ck, pll1_p_ck, sys_use_pll1_p) = self.sys_ck_setup();
// Configure traceclk from PLL if needed
let pll1_r_ck = self.traceclk_setup(sys_use_pll1_p, pll1_p_ck);
// Configure PLL1
let pll1_config = PllConfig {
p_ck: pll1_p_ck,
q_ck: self.config.pll1.q_ck,
r_ck: pll1_r_ck,
};
let (pll1_p_ck, pll1_q_ck, pll1_r_ck) =
self.pll1_setup(rcc, &pll1_config);
// Configure PLL2
let (pll2_p_ck, pll2_q_ck, pll2_r_ck) =
self.pll2_setup(rcc, &self.config.pll2);
// Configure PLL3
let (pll3_p_ck, pll3_q_ck, pll3_r_ck) =
self.pll3_setup(rcc, &self.config.pll3);
// hsi_ck = HSI. This routine does not support HSIDIV != 1. To
// do so it would need to ensure all PLLxON bits are clear
// before changing the value of HSIDIV
let hsi = HSI;
assert!(rcc.cr.read().hsion().is_on(), "HSI oscillator must be on!");
assert!(rcc.cr.read().hsidiv().is_div1());
let csi = CSI;
let hsi48 = HSI48;
// per_ck from HSI by default
let (per_ck, ckpersel) =
match (self.config.per_ck == self.config.hse, self.config.per_ck) {
(true, Some(hse)) => (hse, CKPERSEL::HSE), // HSE
(_, Some(CSI)) => (csi, CKPERSEL::CSI), // CSI
_ => (hsi, CKPERSEL::HSI), // HSI
};
// D1 Core Prescaler
// Set to 1
let d1cpre_bits = 0;
let d1cpre_div = 1;
let sys_d1cpre_ck = sys_ck.0 / d1cpre_div;
// Timer prescaler selection
let timpre = TIMPRE::DEFAULTX2;
// Refer to part datasheet "General operating conditions"
// table for (rev V). We do not assert checks for earlier
// revisions which may have lower limits.
let (sys_d1cpre_ck_max, rcc_hclk_max, pclk_max) = match vos {
Voltage::Scale0 => (480_000_000, 240_000_000, 120_000_000),
Voltage::Scale1 => (400_000_000, 200_000_000, 100_000_000),
Voltage::Scale2 => (300_000_000, 150_000_000, 75_000_000),
_ => (200_000_000, 100_000_000, 50_000_000),
};
// Check resulting sys_d1cpre_ck
assert!(sys_d1cpre_ck <= sys_d1cpre_ck_max);
// Get ideal AHB clock
let rcc_hclk = self.config.rcc_hclk.unwrap_or(sys_d1cpre_ck / 2);
assert!(rcc_hclk <= rcc_hclk_max);
// Estimate divisor
let (hpre_bits, hpre_div) =
match (sys_d1cpre_ck + rcc_hclk - 1) / rcc_hclk {
0 => unreachable!(),
1 => (HPRE::DIV1, 1),
2 => (HPRE::DIV2, 2),
3..=5 => (HPRE::DIV4, 4),
6..=11 => (HPRE::DIV8, 8),
12..=39 => (HPRE::DIV16, 16),
40..=95 => (HPRE::DIV64, 64),
96..=191 => (HPRE::DIV128, 128),
192..=383 => (HPRE::DIV256, 256),
_ => (HPRE::DIV512, 512),
};
// Calculate real AXI and AHB clock
let rcc_hclk = sys_d1cpre_ck / hpre_div;
assert!(rcc_hclk <= rcc_hclk_max);
// Calculate ppreN dividers and real rcc_pclkN frequencies
ppre_calculate! {
(ppre1, ppre1_bits):
(self, rcc_hclk, rcc_pclk1, pclk_max, rcc_timx_ker_ck, timpre),
(ppre2, ppre2_bits):
(self, rcc_hclk, rcc_pclk2, pclk_max, rcc_timy_ker_ck, timpre),
(ppre3, ppre3_bits): (self, rcc_hclk, rcc_pclk3, pclk_max),
(ppre4, ppre4_bits): (self, rcc_hclk, rcc_pclk4, pclk_max),
}
// Start switching clocks here! ----------------------------------------
// Flash setup
Self::flash_setup(sys_d1cpre_ck, vos);
// Ensure CSI is on and stable
rcc.cr.modify(|_, w| w.csion().on());
while rcc.cr.read().csirdy().is_not_ready() {}
// Ensure HSI48 is on and stable
rcc.cr.modify(|_, w| w.hsi48on().on());
while rcc.cr.read().hsi48rdy().is_not_ready() {}
// HSE
let hse_ck = match self.config.hse {
Some(hse) => {
// Ensure HSE is on and stable
rcc.cr.modify(|_, w| w.hseon().on().hsebyp().not_bypassed());
while rcc.cr.read().hserdy().is_not_ready() {}
Some(Hertz(hse))
}
None => None,
};
// PLL
let pllsrc = if self.config.hse.is_some() {
PLLSRC::HSE
} else {
PLLSRC::HSI
};
rcc.pllckselr.modify(|_, w| w.pllsrc().variant(pllsrc));
// PLL1
if pll1_p_ck.is_some() {
// Enable PLL and wait for it to stabilise
rcc.cr.modify(|_, w| w.pll1on().on());
while rcc.cr.read().pll1rdy().is_not_ready() {}
}
// PLL2
if pll2_p_ck.is_some() {
// Enable PLL and wait for it to stabilise
rcc.cr.modify(|_, w| w.pll2on().on());
while rcc.cr.read().pll2rdy().is_not_ready() {}
}
// PLL3
if pll3_p_ck.is_some() {
// Enable PLL and wait for it to stabilise
rcc.cr.modify(|_, w| w.pll3on().on());
while rcc.cr.read().pll3rdy().is_not_ready() {}
}
// Core Prescaler / AHB Prescaler / APB3 Prescaler
rcc.d1cfgr.modify(|_, w| unsafe {
w.d1cpre()
.bits(d1cpre_bits)
.d1ppre() // D1 contains APB3
.bits(ppre3_bits)
.hpre()
.variant(hpre_bits)
});
// Ensure core prescaler value is valid before future lower
// core voltage
while rcc.d1cfgr.read().d1cpre().bits() != d1cpre_bits {}
// APB1 / APB2 Prescaler
rcc.d2cfgr.modify(|_, w| unsafe {
w.d2ppre1() // D2 contains APB1
.bits(ppre1_bits)
.d2ppre2() // D2 also contains APB2
.bits(ppre2_bits)
});
// APB4 Prescaler
rcc.d3cfgr.modify(|_, w| unsafe {
w.d3ppre() // D3 contains APB4
.bits(ppre4_bits)
});
// Peripheral Clock (per_ck)
rcc.d1ccipr.modify(|_, w| w.ckpersel().variant(ckpersel));
// Set timer clocks prescaler setting
rcc.cfgr.modify(|_, w| w.timpre().variant(timpre));
// Select system clock source
let swbits = match (sys_use_pll1_p, self.config.hse.is_some()) {
(true, _) => SW::PLL1 as u8,
(false, true) => SW::HSE as u8,
_ => SW::HSI as u8,
};
rcc.cfgr.modify(|_, w| unsafe { w.sw().bits(swbits) });
while rcc.cfgr.read().sws().bits() != swbits {}
// IO compensation cell - Requires CSI clock and SYSCFG
assert!(rcc.cr.read().csirdy().is_ready());
rcc.apb4enr.modify(|_, w| w.syscfgen().enabled());
// Enable the compensation cell, using back-bias voltage code
// provide by the cell.
syscfg.cccsr.modify(|_, w| {
w.en().set_bit().cs().clear_bit().hslv().clear_bit()
});
while syscfg.cccsr.read().ready().bit_is_clear() {}
// Return frozen clock configuration
Ccdr {
ahb1: AHB1 { _0: () },
ahb2: AHB2 { _0: () },
ahb3: AHB3 { _0: () },
ahb4: AHB4 { _0: () },
apb1l: APB1L { _0: () },
apb1h: APB1H { _0: () },
apb2: APB2 { _0: () },
apb3: APB3 { _0: () },
apb4: APB4 { _0: () },
clocks: CoreClocks {
hclk: Hertz(rcc_hclk),
pclk1: Hertz(rcc_pclk1),
pclk2: Hertz(rcc_pclk2),
pclk3: Hertz(rcc_pclk3),
pclk4: Hertz(rcc_pclk4),
ppre1,
ppre2,
ppre3,
ppre4,
csi_ck: Some(Hertz(csi)),
hsi_ck: Some(Hertz(hsi)),
hsi48_ck: Some(Hertz(hsi48)),
per_ck: Some(Hertz(per_ck)),
hse_ck,
pll1_p_ck,
pll1_q_ck,
pll1_r_ck,
pll2_p_ck,
pll2_q_ck,
pll2_r_ck,
pll3_p_ck,
pll3_q_ck,
pll3_r_ck,
timx_ker_ck: Hertz(rcc_timx_ker_ck),
timy_ker_ck: Hertz(rcc_timy_ker_ck),
sys_ck,
c_ck: Hertz(sys_d1cpre_ck),
},
d3ccipr: D3CCIPR { _0: () },
rb: self.rb,
}
}
}
/// Frozen core clock frequencies
///
/// The existence of this value indicates that the core clock
/// configuration can no longer be changed
#[derive(Clone, Copy)]
pub struct CoreClocks {
hclk: Hertz,
pclk1: Hertz,
pclk2: Hertz,
pclk3: Hertz,
pclk4: Hertz,
ppre1: u8,
ppre2: u8,
ppre3: u8,
ppre4: u8,
csi_ck: Option<Hertz>,
hsi_ck: Option<Hertz>,
hsi48_ck: Option<Hertz>,
per_ck: Option<Hertz>,
hse_ck: Option<Hertz>,
pll1_p_ck: Option<Hertz>,
pll1_q_ck: Option<Hertz>,
pll1_r_ck: Option<Hertz>,
pll2_p_ck: Option<Hertz>,
pll2_q_ck: Option<Hertz>,
pll2_r_ck: Option<Hertz>,
pll3_p_ck: Option<Hertz>,
pll3_q_ck: Option<Hertz>,
pll3_r_ck: Option<Hertz>,
timx_ker_ck: Hertz,
timy_ker_ck: Hertz,
sys_ck: Hertz,
c_ck: Hertz,
}
/// Getters for pclk and ppre
macro_rules! pclk_ppre_getter {
($(($pclk:ident, $ppre:ident),)+) => {
$(
/// Returns the frequency of the APBn
pub fn $pclk(&self) -> Hertz {
self.$pclk
}
/// Returns the prescaler of the APBn
pub fn $ppre(&self) -> u8 {
self.$ppre
}
)+
};
}
/// Getters for optional clocks
macro_rules! optional_ck_getter {
($($opt_ck:ident,)+) => {
$(
/// Returns the frequency of optional clock $opt_ck
pub fn $opt_ck(&self) -> Option<Hertz> {
self.$opt_ck
}
)+
};
}
/// Getters for pll clocks
macro_rules! pll_getter {
($($pll_ck:ident,)+) => {
$(
/// Returns the frequency of the PLLx output
pub fn $pll_ck(&self) -> Option<Hertz> {
self.$pll_ck
}
)+
};
}
impl CoreClocks {
/// Returns the frequency of AHB1,2,3 busses
pub fn hclk(&self) -> Hertz {
self.hclk
}
/// Returns the frequency of the AXI bus
pub fn aclk(&self) -> Hertz {
self.hclk // Same as HCLK
}
pclk_ppre_getter! {
(pclk1, ppre1),
(pclk2, ppre2),
(pclk3, ppre3),
(pclk4, ppre4),
}
optional_ck_getter! {
csi_ck,
hsi_ck,
hsi48_ck,
per_ck,
hse_ck,
}
pll_getter! {
pll1_p_ck,
pll1_q_ck,
pll1_r_ck,
pll2_p_ck,
pll2_q_ck,
pll2_r_ck,
pll3_p_ck,
pll3_q_ck,
pll3_r_ck,
}
/// Returns the input frequency to the SCGU
pub fn sys_ck(&self) -> Hertz {
self.sys_ck
}
/// Returns the input frequency to the SCGU - ALIAS
pub fn sysclk(&self) -> Hertz {
self.sys_ck
}
/// Returns the CK_INT frequency for timers on APB1
pub fn timx_ker_ck(&self) -> Hertz {
self.timx_ker_ck
}
/// Returns the CK_INT frequency for timers on APB2
pub fn timy_ker_ck(&self) -> Hertz {
self.timy_ker_ck
}
/// Returns the core frequency
pub fn c_ck(&self) -> Hertz {
self.c_ck
}
}
|
//! This example demonstrates using colors to stylize [`Grid`] borders.
//! Borders can be set globally with [`SpannedConfig::set_border_color_global()`]
//! or individually with [`SpannedConfig::set_border_color()`].
//!
//! * 🚩 This example requires the `color` feature.
//!
//! * [`CompactConfig`] also supports colorization when the `color` feature is enabled.
use papergrid::{
color::AnsiColor,
colors::NoColors,
config::spanned::SpannedConfig,
config::{AlignmentHorizontal, AlignmentVertical, Borders, Entity::Global, Indent, Sides},
dimension::spanned::SpannedGridDimension,
dimension::Estimate,
grid::iterable::Grid,
records::IterRecords,
};
fn main() {
let cfg = generate_table_config();
let data = vec![
vec!["Papergrid", "is a library", "for printing tables", "!"],
vec!["", "Just like this", "", ""],
];
let records = IterRecords::new(data, 4, Some(2));
let mut dim = SpannedGridDimension::default();
dim.estimate(&records, &cfg);
let grid = Grid::new(records, &dim, &cfg, NoColors).to_string();
println!("{grid}");
}
fn generate_table_config() -> SpannedConfig {
let style = Borders {
top: Some('-'),
top_left: Some('+'),
top_right: Some('+'),
top_intersection: Some('+'),
bottom: Some('-'),
bottom_left: Some('+'),
bottom_right: Some('+'),
bottom_intersection: Some('+'),
horizontal: Some('-'),
left_intersection: Some('+'),
right_intersection: Some('+'),
vertical: Some('|'),
left: Some('|'),
right: Some('|'),
intersection: Some('+'),
};
let mut cfg = SpannedConfig::default();
cfg.set_borders(style);
cfg.set_column_span((1, 1), 3);
cfg.set_row_span((0, 0), 2);
cfg.set_alignment_horizontal((1, 0).into(), AlignmentHorizontal::Center);
cfg.set_alignment_vertical(Global, AlignmentVertical::Center);
cfg.set_padding(
(0, 0).into(),
Sides::new(
Indent::spaced(4),
Indent::spaced(4),
Indent::spaced(1),
Indent::spaced(1),
),
);
cfg.set_border_color_global(AnsiColor::new("\u{1b}[42m".into(), "\u{1b}[0m".into()));
cfg
}
|
// (C) Copyright 2019-2020 Hewlett Packard Enterprise Development LP
/// The internal Pest parser.
#[derive(Parser)]
#[grammar = "dockerfile_parser.pest"]
pub(crate) struct DockerfileParser;
/// A Pest Pair for Dockerfile rules.
pub(crate) type Pair<'a> = pest::iterators::Pair<'a, Rule>;
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecoverableDatabaseProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub edition: Option<String>,
#[serde(rename = "serviceLevelObjective", default, skip_serializing_if = "Option::is_none")]
pub service_level_objective: Option<String>,
#[serde(rename = "elasticPoolName", default, skip_serializing_if = "Option::is_none")]
pub elastic_pool_name: Option<String>,
#[serde(rename = "lastAvailableBackupDate", default, skip_serializing_if = "Option::is_none")]
pub last_available_backup_date: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecoverableDatabase {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RecoverableDatabaseProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecoverableDatabaseListResult {
pub value: Vec<RecoverableDatabase>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorableDroppedDatabaseProperties {
#[serde(rename = "databaseName", default, skip_serializing_if = "Option::is_none")]
pub database_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub edition: Option<String>,
#[serde(rename = "maxSizeBytes", default, skip_serializing_if = "Option::is_none")]
pub max_size_bytes: Option<String>,
#[serde(rename = "serviceLevelObjective", default, skip_serializing_if = "Option::is_none")]
pub service_level_objective: Option<String>,
#[serde(rename = "elasticPoolName", default, skip_serializing_if = "Option::is_none")]
pub elastic_pool_name: Option<String>,
#[serde(rename = "creationDate", default, skip_serializing_if = "Option::is_none")]
pub creation_date: Option<String>,
#[serde(rename = "deletionDate", default, skip_serializing_if = "Option::is_none")]
pub deletion_date: Option<String>,
#[serde(rename = "earliestRestoreDate", default, skip_serializing_if = "Option::is_none")]
pub earliest_restore_date: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorableDroppedDatabase {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RestorableDroppedDatabaseProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorableDroppedDatabaseListResult {
pub value: Vec<RestorableDroppedDatabase>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointProperties {
#[serde(rename = "restorePointType", default, skip_serializing_if = "Option::is_none")]
pub restore_point_type: Option<restore_point_properties::RestorePointType>,
#[serde(rename = "restorePointCreationDate", default, skip_serializing_if = "Option::is_none")]
pub restore_point_creation_date: Option<String>,
#[serde(rename = "earliestRestoreDate", default, skip_serializing_if = "Option::is_none")]
pub earliest_restore_date: Option<String>,
}
pub mod restore_point_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RestorePointType {
#[serde(rename = "DISCRETE")]
Discrete,
#[serde(rename = "CONTINUOUS")]
Continuous,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePoint {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RestorePointProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointListResult {
pub value: Vec<RestorePoint>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckNameAvailabilityRequest {
pub name: String,
#[serde(rename = "type")]
pub type_: check_name_availability_request::Type,
}
pub mod check_name_availability_request {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "Microsoft.Sql/servers")]
MicrosoftSqlServers,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckNameAvailabilityResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub available: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<check_name_availability_response::Reason>,
}
pub mod check_name_availability_response {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Reason {
Invalid,
AlreadyExists,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerConnectionPolicyProperties {
#[serde(rename = "connectionType")]
pub connection_type: server_connection_policy_properties::ConnectionType,
}
pub mod server_connection_policy_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ConnectionType {
Default,
Proxy,
Redirect,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerConnectionPolicy {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServerConnectionPolicyProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabaseProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub collation: Option<String>,
#[serde(rename = "creationDate", default, skip_serializing_if = "Option::is_none")]
pub creation_date: Option<String>,
#[serde(rename = "containmentState", default, skip_serializing_if = "Option::is_none")]
pub containment_state: Option<i64>,
#[serde(rename = "currentServiceObjectiveId", default, skip_serializing_if = "Option::is_none")]
pub current_service_objective_id: Option<String>,
#[serde(rename = "databaseId", default, skip_serializing_if = "Option::is_none")]
pub database_id: Option<String>,
#[serde(rename = "earliestRestoreDate", default, skip_serializing_if = "Option::is_none")]
pub earliest_restore_date: Option<String>,
#[serde(rename = "createMode", default, skip_serializing_if = "Option::is_none")]
pub create_mode: Option<database_properties::CreateMode>,
#[serde(rename = "sourceDatabaseId", default, skip_serializing_if = "Option::is_none")]
pub source_database_id: Option<String>,
#[serde(rename = "sourceDatabaseDeletionDate", default, skip_serializing_if = "Option::is_none")]
pub source_database_deletion_date: Option<String>,
#[serde(rename = "restorePointInTime", default, skip_serializing_if = "Option::is_none")]
pub restore_point_in_time: Option<String>,
#[serde(
rename = "recoveryServicesRecoveryPointResourceId",
default,
skip_serializing_if = "Option::is_none"
)]
pub recovery_services_recovery_point_resource_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub edition: Option<database_properties::Edition>,
#[serde(rename = "maxSizeBytes", default, skip_serializing_if = "Option::is_none")]
pub max_size_bytes: Option<String>,
#[serde(rename = "requestedServiceObjectiveId", default, skip_serializing_if = "Option::is_none")]
pub requested_service_objective_id: Option<String>,
#[serde(rename = "requestedServiceObjectiveName", default, skip_serializing_if = "Option::is_none")]
pub requested_service_objective_name: Option<database_properties::RequestedServiceObjectiveName>,
#[serde(rename = "serviceLevelObjective", default, skip_serializing_if = "Option::is_none")]
pub service_level_objective: Option<database_properties::ServiceLevelObjective>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(rename = "elasticPoolName", default, skip_serializing_if = "Option::is_none")]
pub elastic_pool_name: Option<String>,
#[serde(rename = "defaultSecondaryLocation", default, skip_serializing_if = "Option::is_none")]
pub default_secondary_location: Option<String>,
#[serde(rename = "serviceTierAdvisors", default, skip_serializing_if = "Vec::is_empty")]
pub service_tier_advisors: Vec<ServiceTierAdvisor>,
#[serde(rename = "transparentDataEncryption", default, skip_serializing_if = "Vec::is_empty")]
pub transparent_data_encryption: Vec<TransparentDataEncryption>,
#[serde(rename = "recommendedIndex", default, skip_serializing_if = "Vec::is_empty")]
pub recommended_index: Vec<RecommendedIndex>,
#[serde(rename = "failoverGroupId", default, skip_serializing_if = "Option::is_none")]
pub failover_group_id: Option<String>,
#[serde(rename = "readScale", default, skip_serializing_if = "Option::is_none")]
pub read_scale: Option<database_properties::ReadScale>,
#[serde(rename = "sampleName", default, skip_serializing_if = "Option::is_none")]
pub sample_name: Option<database_properties::SampleName>,
#[serde(rename = "zoneRedundant", default, skip_serializing_if = "Option::is_none")]
pub zone_redundant: Option<bool>,
}
pub mod database_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreateMode {
Copy,
Default,
NonReadableSecondary,
OnlineSecondary,
PointInTimeRestore,
Recovery,
Restore,
RestoreLongTermRetentionBackup,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Edition {
Web,
Business,
Basic,
Standard,
Premium,
#[serde(rename = "PremiumRS")]
PremiumRs,
Free,
Stretch,
DataWarehouse,
System,
System2,
GeneralPurpose,
BusinessCritical,
Hyperscale,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RequestedServiceObjectiveName {
System,
System0,
System1,
System2,
System3,
System4,
System2L,
System3L,
System4L,
Free,
Basic,
S0,
S1,
S2,
S3,
S4,
S6,
S7,
S9,
S12,
P1,
P2,
P3,
P4,
P6,
P11,
P15,
#[serde(rename = "PRS1")]
Prs1,
#[serde(rename = "PRS2")]
Prs2,
#[serde(rename = "PRS4")]
Prs4,
#[serde(rename = "PRS6")]
Prs6,
#[serde(rename = "DW100")]
Dw100,
#[serde(rename = "DW200")]
Dw200,
#[serde(rename = "DW300")]
Dw300,
#[serde(rename = "DW400")]
Dw400,
#[serde(rename = "DW500")]
Dw500,
#[serde(rename = "DW600")]
Dw600,
#[serde(rename = "DW1000")]
Dw1000,
#[serde(rename = "DW1200")]
Dw1200,
#[serde(rename = "DW1000c")]
Dw1000c,
#[serde(rename = "DW1500")]
Dw1500,
#[serde(rename = "DW1500c")]
Dw1500c,
#[serde(rename = "DW2000")]
Dw2000,
#[serde(rename = "DW2000c")]
Dw2000c,
#[serde(rename = "DW3000")]
Dw3000,
#[serde(rename = "DW2500c")]
Dw2500c,
#[serde(rename = "DW3000c")]
Dw3000c,
#[serde(rename = "DW6000")]
Dw6000,
#[serde(rename = "DW5000c")]
Dw5000c,
#[serde(rename = "DW6000c")]
Dw6000c,
#[serde(rename = "DW7500c")]
Dw7500c,
#[serde(rename = "DW10000c")]
Dw10000c,
#[serde(rename = "DW15000c")]
Dw15000c,
#[serde(rename = "DW30000c")]
Dw30000c,
#[serde(rename = "DS100")]
Ds100,
#[serde(rename = "DS200")]
Ds200,
#[serde(rename = "DS300")]
Ds300,
#[serde(rename = "DS400")]
Ds400,
#[serde(rename = "DS500")]
Ds500,
#[serde(rename = "DS600")]
Ds600,
#[serde(rename = "DS1000")]
Ds1000,
#[serde(rename = "DS1200")]
Ds1200,
#[serde(rename = "DS1500")]
Ds1500,
#[serde(rename = "DS2000")]
Ds2000,
ElasticPool,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServiceLevelObjective {
System,
System0,
System1,
System2,
System3,
System4,
System2L,
System3L,
System4L,
Free,
Basic,
S0,
S1,
S2,
S3,
S4,
S6,
S7,
S9,
S12,
P1,
P2,
P3,
P4,
P6,
P11,
P15,
#[serde(rename = "PRS1")]
Prs1,
#[serde(rename = "PRS2")]
Prs2,
#[serde(rename = "PRS4")]
Prs4,
#[serde(rename = "PRS6")]
Prs6,
#[serde(rename = "DW100")]
Dw100,
#[serde(rename = "DW200")]
Dw200,
#[serde(rename = "DW300")]
Dw300,
#[serde(rename = "DW400")]
Dw400,
#[serde(rename = "DW500")]
Dw500,
#[serde(rename = "DW600")]
Dw600,
#[serde(rename = "DW1000")]
Dw1000,
#[serde(rename = "DW1200")]
Dw1200,
#[serde(rename = "DW1000c")]
Dw1000c,
#[serde(rename = "DW1500")]
Dw1500,
#[serde(rename = "DW1500c")]
Dw1500c,
#[serde(rename = "DW2000")]
Dw2000,
#[serde(rename = "DW2000c")]
Dw2000c,
#[serde(rename = "DW3000")]
Dw3000,
#[serde(rename = "DW2500c")]
Dw2500c,
#[serde(rename = "DW3000c")]
Dw3000c,
#[serde(rename = "DW6000")]
Dw6000,
#[serde(rename = "DW5000c")]
Dw5000c,
#[serde(rename = "DW6000c")]
Dw6000c,
#[serde(rename = "DW7500c")]
Dw7500c,
#[serde(rename = "DW10000c")]
Dw10000c,
#[serde(rename = "DW15000c")]
Dw15000c,
#[serde(rename = "DW30000c")]
Dw30000c,
#[serde(rename = "DS100")]
Ds100,
#[serde(rename = "DS200")]
Ds200,
#[serde(rename = "DS300")]
Ds300,
#[serde(rename = "DS400")]
Ds400,
#[serde(rename = "DS500")]
Ds500,
#[serde(rename = "DS600")]
Ds600,
#[serde(rename = "DS1000")]
Ds1000,
#[serde(rename = "DS1200")]
Ds1200,
#[serde(rename = "DS1500")]
Ds1500,
#[serde(rename = "DS2000")]
Ds2000,
ElasticPool,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReadScale {
Enabled,
Disabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SampleName {
#[serde(rename = "AdventureWorksLT")]
AdventureWorksLt,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Database {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DatabaseProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabaseUpdate {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DatabaseProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabaseListResult {
pub value: Vec<Database>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolActivityProperties {
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(rename = "errorCode", default, skip_serializing_if = "Option::is_none")]
pub error_code: Option<i32>,
#[serde(rename = "errorMessage", default, skip_serializing_if = "Option::is_none")]
pub error_message: Option<String>,
#[serde(rename = "errorSeverity", default, skip_serializing_if = "Option::is_none")]
pub error_severity: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(rename = "operationId", default, skip_serializing_if = "Option::is_none")]
pub operation_id: Option<String>,
#[serde(rename = "percentComplete", default, skip_serializing_if = "Option::is_none")]
pub percent_complete: Option<i32>,
#[serde(rename = "requestedDatabaseDtuMax", default, skip_serializing_if = "Option::is_none")]
pub requested_database_dtu_max: Option<i32>,
#[serde(rename = "requestedDatabaseDtuMin", default, skip_serializing_if = "Option::is_none")]
pub requested_database_dtu_min: Option<i32>,
#[serde(rename = "requestedDtu", default, skip_serializing_if = "Option::is_none")]
pub requested_dtu: Option<i32>,
#[serde(rename = "requestedElasticPoolName", default, skip_serializing_if = "Option::is_none")]
pub requested_elastic_pool_name: Option<String>,
#[serde(rename = "requestedStorageLimitInGB", default, skip_serializing_if = "Option::is_none")]
pub requested_storage_limit_in_gb: Option<i64>,
#[serde(rename = "elasticPoolName", default, skip_serializing_if = "Option::is_none")]
pub elastic_pool_name: Option<String>,
#[serde(rename = "serverName", default, skip_serializing_if = "Option::is_none")]
pub server_name: Option<String>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<String>,
#[serde(rename = "requestedStorageLimitInMB", default, skip_serializing_if = "Option::is_none")]
pub requested_storage_limit_in_mb: Option<i32>,
#[serde(rename = "requestedDatabaseDtuGuarantee", default, skip_serializing_if = "Option::is_none")]
pub requested_database_dtu_guarantee: Option<i32>,
#[serde(rename = "requestedDatabaseDtuCap", default, skip_serializing_if = "Option::is_none")]
pub requested_database_dtu_cap: Option<i32>,
#[serde(rename = "requestedDtuGuarantee", default, skip_serializing_if = "Option::is_none")]
pub requested_dtu_guarantee: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolActivity {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ElasticPoolActivityProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolActivityListResult {
pub value: Vec<ElasticPoolActivity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolDatabaseActivityProperties {
#[serde(rename = "databaseName", default, skip_serializing_if = "Option::is_none")]
pub database_name: Option<String>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(rename = "errorCode", default, skip_serializing_if = "Option::is_none")]
pub error_code: Option<i32>,
#[serde(rename = "errorMessage", default, skip_serializing_if = "Option::is_none")]
pub error_message: Option<String>,
#[serde(rename = "errorSeverity", default, skip_serializing_if = "Option::is_none")]
pub error_severity: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(rename = "operationId", default, skip_serializing_if = "Option::is_none")]
pub operation_id: Option<String>,
#[serde(rename = "percentComplete", default, skip_serializing_if = "Option::is_none")]
pub percent_complete: Option<i32>,
#[serde(rename = "requestedElasticPoolName", default, skip_serializing_if = "Option::is_none")]
pub requested_elastic_pool_name: Option<String>,
#[serde(rename = "currentElasticPoolName", default, skip_serializing_if = "Option::is_none")]
pub current_elastic_pool_name: Option<String>,
#[serde(rename = "currentServiceObjective", default, skip_serializing_if = "Option::is_none")]
pub current_service_objective: Option<String>,
#[serde(rename = "requestedServiceObjective", default, skip_serializing_if = "Option::is_none")]
pub requested_service_objective: Option<String>,
#[serde(rename = "serverName", default, skip_serializing_if = "Option::is_none")]
pub server_name: Option<String>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolDatabaseActivity {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ElasticPoolDatabaseActivityProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolDatabaseActivityListResult {
pub value: Vec<ElasticPoolDatabaseActivity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecommendedIndexProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub action: Option<recommended_index_properties::Action>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<recommended_index_properties::State>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub created: Option<String>,
#[serde(rename = "lastModified", default, skip_serializing_if = "Option::is_none")]
pub last_modified: Option<String>,
#[serde(rename = "indexType", default, skip_serializing_if = "Option::is_none")]
pub index_type: Option<recommended_index_properties::IndexType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub columns: Vec<String>,
#[serde(rename = "includedColumns", default, skip_serializing_if = "Vec::is_empty")]
pub included_columns: Vec<String>,
#[serde(rename = "indexScript", default, skip_serializing_if = "Option::is_none")]
pub index_script: Option<String>,
#[serde(rename = "estimatedImpact", default, skip_serializing_if = "Vec::is_empty")]
pub estimated_impact: Vec<OperationImpact>,
#[serde(rename = "reportedImpact", default, skip_serializing_if = "Vec::is_empty")]
pub reported_impact: Vec<OperationImpact>,
}
pub mod recommended_index_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Action {
Create,
Drop,
Rebuild,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Active,
Pending,
Executing,
Verifying,
#[serde(rename = "Pending Revert")]
PendingRevert,
Reverting,
Reverted,
Ignored,
Expired,
Blocked,
Success,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IndexType {
#[serde(rename = "CLUSTERED")]
Clustered,
#[serde(rename = "NONCLUSTERED")]
Nonclustered,
#[serde(rename = "COLUMNSTORE")]
Columnstore,
#[serde(rename = "CLUSTERED COLUMNSTORE")]
ClusteredColumnstore,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecommendedIndex {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RecommendedIndexProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TransparentDataEncryptionProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<transparent_data_encryption_properties::Status>,
}
pub mod transparent_data_encryption_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TransparentDataEncryption {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<TransparentDataEncryptionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceTierAdvisorProperties {
#[serde(rename = "observationPeriodStart", default, skip_serializing_if = "Option::is_none")]
pub observation_period_start: Option<String>,
#[serde(rename = "observationPeriodEnd", default, skip_serializing_if = "Option::is_none")]
pub observation_period_end: Option<String>,
#[serde(rename = "activeTimeRatio", default, skip_serializing_if = "Option::is_none")]
pub active_time_ratio: Option<f64>,
#[serde(rename = "minDtu", default, skip_serializing_if = "Option::is_none")]
pub min_dtu: Option<f64>,
#[serde(rename = "avgDtu", default, skip_serializing_if = "Option::is_none")]
pub avg_dtu: Option<f64>,
#[serde(rename = "maxDtu", default, skip_serializing_if = "Option::is_none")]
pub max_dtu: Option<f64>,
#[serde(rename = "maxSizeInGB", default, skip_serializing_if = "Option::is_none")]
pub max_size_in_gb: Option<f64>,
#[serde(rename = "serviceLevelObjectiveUsageMetrics", default, skip_serializing_if = "Vec::is_empty")]
pub service_level_objective_usage_metrics: Vec<SloUsageMetric>,
#[serde(rename = "currentServiceLevelObjective", default, skip_serializing_if = "Option::is_none")]
pub current_service_level_objective: Option<String>,
#[serde(rename = "currentServiceLevelObjectiveId", default, skip_serializing_if = "Option::is_none")]
pub current_service_level_objective_id: Option<String>,
#[serde(
rename = "usageBasedRecommendationServiceLevelObjective",
default,
skip_serializing_if = "Option::is_none"
)]
pub usage_based_recommendation_service_level_objective: Option<String>,
#[serde(
rename = "usageBasedRecommendationServiceLevelObjectiveId",
default,
skip_serializing_if = "Option::is_none"
)]
pub usage_based_recommendation_service_level_objective_id: Option<String>,
#[serde(
rename = "databaseSizeBasedRecommendationServiceLevelObjective",
default,
skip_serializing_if = "Option::is_none"
)]
pub database_size_based_recommendation_service_level_objective: Option<String>,
#[serde(
rename = "databaseSizeBasedRecommendationServiceLevelObjectiveId",
default,
skip_serializing_if = "Option::is_none"
)]
pub database_size_based_recommendation_service_level_objective_id: Option<String>,
#[serde(
rename = "disasterPlanBasedRecommendationServiceLevelObjective",
default,
skip_serializing_if = "Option::is_none"
)]
pub disaster_plan_based_recommendation_service_level_objective: Option<String>,
#[serde(
rename = "disasterPlanBasedRecommendationServiceLevelObjectiveId",
default,
skip_serializing_if = "Option::is_none"
)]
pub disaster_plan_based_recommendation_service_level_objective_id: Option<String>,
#[serde(
rename = "overallRecommendationServiceLevelObjective",
default,
skip_serializing_if = "Option::is_none"
)]
pub overall_recommendation_service_level_objective: Option<String>,
#[serde(
rename = "overallRecommendationServiceLevelObjectiveId",
default,
skip_serializing_if = "Option::is_none"
)]
pub overall_recommendation_service_level_objective_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub confidence: Option<f64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceTierAdvisor {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServiceTierAdvisorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SloUsageMetric {
#[serde(rename = "serviceLevelObjective", default, skip_serializing_if = "Option::is_none")]
pub service_level_objective: Option<slo_usage_metric::ServiceLevelObjective>,
#[serde(rename = "serviceLevelObjectiveId", default, skip_serializing_if = "Option::is_none")]
pub service_level_objective_id: Option<String>,
#[serde(rename = "inRangeTimeRatio", default, skip_serializing_if = "Option::is_none")]
pub in_range_time_ratio: Option<f64>,
}
pub mod slo_usage_metric {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServiceLevelObjective {
System,
System0,
System1,
System2,
System3,
System4,
System2L,
System3L,
System4L,
Free,
Basic,
S0,
S1,
S2,
S3,
S4,
S6,
S7,
S9,
S12,
P1,
P2,
P3,
P4,
P6,
P11,
P15,
#[serde(rename = "PRS1")]
Prs1,
#[serde(rename = "PRS2")]
Prs2,
#[serde(rename = "PRS4")]
Prs4,
#[serde(rename = "PRS6")]
Prs6,
#[serde(rename = "DW100")]
Dw100,
#[serde(rename = "DW200")]
Dw200,
#[serde(rename = "DW300")]
Dw300,
#[serde(rename = "DW400")]
Dw400,
#[serde(rename = "DW500")]
Dw500,
#[serde(rename = "DW600")]
Dw600,
#[serde(rename = "DW1000")]
Dw1000,
#[serde(rename = "DW1200")]
Dw1200,
#[serde(rename = "DW1000c")]
Dw1000c,
#[serde(rename = "DW1500")]
Dw1500,
#[serde(rename = "DW1500c")]
Dw1500c,
#[serde(rename = "DW2000")]
Dw2000,
#[serde(rename = "DW2000c")]
Dw2000c,
#[serde(rename = "DW3000")]
Dw3000,
#[serde(rename = "DW2500c")]
Dw2500c,
#[serde(rename = "DW3000c")]
Dw3000c,
#[serde(rename = "DW6000")]
Dw6000,
#[serde(rename = "DW5000c")]
Dw5000c,
#[serde(rename = "DW6000c")]
Dw6000c,
#[serde(rename = "DW7500c")]
Dw7500c,
#[serde(rename = "DW10000c")]
Dw10000c,
#[serde(rename = "DW15000c")]
Dw15000c,
#[serde(rename = "DW30000c")]
Dw30000c,
#[serde(rename = "DS100")]
Ds100,
#[serde(rename = "DS200")]
Ds200,
#[serde(rename = "DS300")]
Ds300,
#[serde(rename = "DS400")]
Ds400,
#[serde(rename = "DS500")]
Ds500,
#[serde(rename = "DS600")]
Ds600,
#[serde(rename = "DS1000")]
Ds1000,
#[serde(rename = "DS1200")]
Ds1200,
#[serde(rename = "DS1500")]
Ds1500,
#[serde(rename = "DS2000")]
Ds2000,
ElasticPool,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationImpact {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<String>,
#[serde(rename = "changeValueAbsolute", default, skip_serializing_if = "Option::is_none")]
pub change_value_absolute: Option<f64>,
#[serde(rename = "changeValueRelative", default, skip_serializing_if = "Option::is_none")]
pub change_value_relative: Option<f64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceTierAdvisorListResult {
pub value: Vec<ServiceTierAdvisor>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TransparentDataEncryptionActivityProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<transparent_data_encryption_activity_properties::Status>,
#[serde(rename = "percentComplete", default, skip_serializing_if = "Option::is_none")]
pub percent_complete: Option<f32>,
}
pub mod transparent_data_encryption_activity_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Encrypting,
Decrypting,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TransparentDataEncryptionActivity {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<TransparentDataEncryptionActivityProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TransparentDataEncryptionActivityListResult {
pub value: Vec<TransparentDataEncryptionActivity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabaseSecurityAlertPolicy {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DatabaseSecurityAlertPolicyProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabaseSecurityAlertPolicyProperties {
pub state: database_security_alert_policy_properties::State,
#[serde(rename = "disabledAlerts", default, skip_serializing_if = "Option::is_none")]
pub disabled_alerts: Option<String>,
#[serde(rename = "emailAddresses", default, skip_serializing_if = "Option::is_none")]
pub email_addresses: Option<String>,
#[serde(rename = "emailAccountAdmins", default, skip_serializing_if = "Option::is_none")]
pub email_account_admins: Option<database_security_alert_policy_properties::EmailAccountAdmins>,
#[serde(rename = "storageEndpoint", default, skip_serializing_if = "Option::is_none")]
pub storage_endpoint: Option<String>,
#[serde(rename = "storageAccountAccessKey", default, skip_serializing_if = "Option::is_none")]
pub storage_account_access_key: Option<String>,
#[serde(rename = "retentionDays", default, skip_serializing_if = "Option::is_none")]
pub retention_days: Option<i32>,
#[serde(rename = "useServerDefault", default, skip_serializing_if = "Option::is_none")]
pub use_server_default: Option<database_security_alert_policy_properties::UseServerDefault>,
}
pub mod database_security_alert_policy_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
New,
Enabled,
Disabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum EmailAccountAdmins {
Enabled,
Disabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum UseServerDefault {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataMaskingPolicyProperties {
#[serde(rename = "dataMaskingState")]
pub data_masking_state: data_masking_policy_properties::DataMaskingState,
#[serde(rename = "exemptPrincipals", default, skip_serializing_if = "Option::is_none")]
pub exempt_principals: Option<String>,
#[serde(rename = "applicationPrincipals", default, skip_serializing_if = "Option::is_none")]
pub application_principals: Option<String>,
#[serde(rename = "maskingLevel", default, skip_serializing_if = "Option::is_none")]
pub masking_level: Option<String>,
}
pub mod data_masking_policy_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DataMaskingState {
Disabled,
Enabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataMaskingPolicy {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DataMaskingPolicyProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataMaskingRuleProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "aliasName", default, skip_serializing_if = "Option::is_none")]
pub alias_name: Option<String>,
#[serde(rename = "ruleState", default, skip_serializing_if = "Option::is_none")]
pub rule_state: Option<data_masking_rule_properties::RuleState>,
#[serde(rename = "schemaName")]
pub schema_name: String,
#[serde(rename = "tableName")]
pub table_name: String,
#[serde(rename = "columnName")]
pub column_name: String,
#[serde(rename = "maskingFunction")]
pub masking_function: data_masking_rule_properties::MaskingFunction,
#[serde(rename = "numberFrom", default, skip_serializing_if = "Option::is_none")]
pub number_from: Option<String>,
#[serde(rename = "numberTo", default, skip_serializing_if = "Option::is_none")]
pub number_to: Option<String>,
#[serde(rename = "prefixSize", default, skip_serializing_if = "Option::is_none")]
pub prefix_size: Option<String>,
#[serde(rename = "suffixSize", default, skip_serializing_if = "Option::is_none")]
pub suffix_size: Option<String>,
#[serde(rename = "replacementString", default, skip_serializing_if = "Option::is_none")]
pub replacement_string: Option<String>,
}
pub mod data_masking_rule_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RuleState {
Disabled,
Enabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum MaskingFunction {
Default,
#[serde(rename = "CCN")]
Ccn,
Email,
Number,
#[serde(rename = "SSN")]
Ssn,
Text,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataMaskingRule {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DataMaskingRuleProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataMaskingRuleListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DataMaskingRule>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolProperties {
#[serde(rename = "creationDate", default, skip_serializing_if = "Option::is_none")]
pub creation_date: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<elastic_pool_properties::State>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub edition: Option<elastic_pool_properties::Edition>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub dtu: Option<i32>,
#[serde(rename = "databaseDtuMax", default, skip_serializing_if = "Option::is_none")]
pub database_dtu_max: Option<i32>,
#[serde(rename = "databaseDtuMin", default, skip_serializing_if = "Option::is_none")]
pub database_dtu_min: Option<i32>,
#[serde(rename = "storageMB", default, skip_serializing_if = "Option::is_none")]
pub storage_mb: Option<i32>,
#[serde(rename = "zoneRedundant", default, skip_serializing_if = "Option::is_none")]
pub zone_redundant: Option<bool>,
}
pub mod elastic_pool_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Creating,
Ready,
Disabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Edition {
Basic,
Standard,
Premium,
GeneralPurpose,
BusinessCritical,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPool {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ElasticPoolProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolUpdate {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ElasticPoolProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolListResult {
pub value: Vec<ElasticPool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FirewallRuleProperties {
#[serde(rename = "startIpAddress")]
pub start_ip_address: String,
#[serde(rename = "endIpAddress")]
pub end_ip_address: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FirewallRule {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<FirewallRuleProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FirewallRuleListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<FirewallRule>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GeoBackupPolicyProperties {
pub state: geo_backup_policy_properties::State,
#[serde(rename = "storageType", default, skip_serializing_if = "Option::is_none")]
pub storage_type: Option<String>,
}
pub mod geo_backup_policy_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Disabled,
Enabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GeoBackupPolicy {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
pub properties: GeoBackupPolicyProperties,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GeoBackupPolicyListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<GeoBackupPolicy>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImportExtensionProperties {
#[serde(flatten)]
pub export_request: ExportRequest,
#[serde(rename = "operationMode")]
pub operation_mode: import_extension_properties::OperationMode,
}
pub mod import_extension_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OperationMode {
Import,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImportExtensionRequest {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ImportExtensionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImportExportResponse {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ImportExportResponseProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImportExportResponseProperties {
#[serde(rename = "requestType", default, skip_serializing_if = "Option::is_none")]
pub request_type: Option<String>,
#[serde(rename = "requestId", default, skip_serializing_if = "Option::is_none")]
pub request_id: Option<String>,
#[serde(rename = "serverName", default, skip_serializing_if = "Option::is_none")]
pub server_name: Option<String>,
#[serde(rename = "databaseName", default, skip_serializing_if = "Option::is_none")]
pub database_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(rename = "lastModifiedTime", default, skip_serializing_if = "Option::is_none")]
pub last_modified_time: Option<String>,
#[serde(rename = "queuedTime", default, skip_serializing_if = "Option::is_none")]
pub queued_time: Option<String>,
#[serde(rename = "blobUri", default, skip_serializing_if = "Option::is_none")]
pub blob_uri: Option<String>,
#[serde(rename = "errorMessage", default, skip_serializing_if = "Option::is_none")]
pub error_message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImportRequest {
#[serde(flatten)]
pub export_request: ExportRequest,
#[serde(rename = "databaseName")]
pub database_name: String,
pub edition: import_request::Edition,
#[serde(rename = "serviceObjectiveName")]
pub service_objective_name: import_request::ServiceObjectiveName,
#[serde(rename = "maxSizeBytes")]
pub max_size_bytes: String,
}
pub mod import_request {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Edition {
Web,
Business,
Basic,
Standard,
Premium,
#[serde(rename = "PremiumRS")]
PremiumRs,
Free,
Stretch,
DataWarehouse,
System,
System2,
GeneralPurpose,
BusinessCritical,
Hyperscale,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServiceObjectiveName {
System,
System0,
System1,
System2,
System3,
System4,
System2L,
System3L,
System4L,
Free,
Basic,
S0,
S1,
S2,
S3,
S4,
S6,
S7,
S9,
S12,
P1,
P2,
P3,
P4,
P6,
P11,
P15,
#[serde(rename = "PRS1")]
Prs1,
#[serde(rename = "PRS2")]
Prs2,
#[serde(rename = "PRS4")]
Prs4,
#[serde(rename = "PRS6")]
Prs6,
#[serde(rename = "DW100")]
Dw100,
#[serde(rename = "DW200")]
Dw200,
#[serde(rename = "DW300")]
Dw300,
#[serde(rename = "DW400")]
Dw400,
#[serde(rename = "DW500")]
Dw500,
#[serde(rename = "DW600")]
Dw600,
#[serde(rename = "DW1000")]
Dw1000,
#[serde(rename = "DW1200")]
Dw1200,
#[serde(rename = "DW1000c")]
Dw1000c,
#[serde(rename = "DW1500")]
Dw1500,
#[serde(rename = "DW1500c")]
Dw1500c,
#[serde(rename = "DW2000")]
Dw2000,
#[serde(rename = "DW2000c")]
Dw2000c,
#[serde(rename = "DW3000")]
Dw3000,
#[serde(rename = "DW2500c")]
Dw2500c,
#[serde(rename = "DW3000c")]
Dw3000c,
#[serde(rename = "DW6000")]
Dw6000,
#[serde(rename = "DW5000c")]
Dw5000c,
#[serde(rename = "DW6000c")]
Dw6000c,
#[serde(rename = "DW7500c")]
Dw7500c,
#[serde(rename = "DW10000c")]
Dw10000c,
#[serde(rename = "DW15000c")]
Dw15000c,
#[serde(rename = "DW30000c")]
Dw30000c,
#[serde(rename = "DS100")]
Ds100,
#[serde(rename = "DS200")]
Ds200,
#[serde(rename = "DS300")]
Ds300,
#[serde(rename = "DS400")]
Ds400,
#[serde(rename = "DS500")]
Ds500,
#[serde(rename = "DS600")]
Ds600,
#[serde(rename = "DS1000")]
Ds1000,
#[serde(rename = "DS1200")]
Ds1200,
#[serde(rename = "DS1500")]
Ds1500,
#[serde(rename = "DS2000")]
Ds2000,
ElasticPool,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportRequest {
#[serde(rename = "storageKeyType")]
pub storage_key_type: export_request::StorageKeyType,
#[serde(rename = "storageKey")]
pub storage_key: String,
#[serde(rename = "storageUri")]
pub storage_uri: String,
#[serde(rename = "administratorLogin")]
pub administrator_login: String,
#[serde(rename = "administratorLoginPassword")]
pub administrator_login_password: String,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<export_request::AuthenticationType>,
}
pub mod export_request {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum StorageKeyType {
StorageAccessKey,
SharedAccessKey,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
#[serde(rename = "SQL")]
Sql,
#[serde(rename = "ADPassword")]
AdPassword,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MetricValue {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub average: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub maximum: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub minimum: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timestamp: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub total: Option<f64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MetricName {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")]
pub localized_value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Metric {
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(rename = "timeGrain", default, skip_serializing_if = "Option::is_none")]
pub time_grain: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<metric::Unit>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<MetricName>,
#[serde(rename = "metricValues", default, skip_serializing_if = "Vec::is_empty")]
pub metric_values: Vec<MetricValue>,
}
pub mod metric {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
#[serde(rename = "count")]
Count,
#[serde(rename = "bytes")]
Bytes,
#[serde(rename = "seconds")]
Seconds,
#[serde(rename = "percent")]
Percent,
#[serde(rename = "countPerSecond")]
CountPerSecond,
#[serde(rename = "bytesPerSecond")]
BytesPerSecond,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MetricListResult {
pub value: Vec<Metric>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MetricAvailability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub retention: Option<String>,
#[serde(rename = "timeGrain", default, skip_serializing_if = "Option::is_none")]
pub time_grain: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MetricDefinition {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<MetricName>,
#[serde(rename = "primaryAggregationType", default, skip_serializing_if = "Option::is_none")]
pub primary_aggregation_type: Option<metric_definition::PrimaryAggregationType>,
#[serde(rename = "resourceUri", default, skip_serializing_if = "Option::is_none")]
pub resource_uri: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<metric_definition::Unit>,
#[serde(rename = "metricAvailabilities", default, skip_serializing_if = "Vec::is_empty")]
pub metric_availabilities: Vec<MetricAvailability>,
}
pub mod metric_definition {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrimaryAggregationType {
None,
Average,
Count,
Minimum,
Maximum,
Total,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
Bytes,
Seconds,
Percent,
CountPerSecond,
BytesPerSecond,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MetricDefinitionListResult {
pub value: Vec<MetricDefinition>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReplicationLinkProperties {
#[serde(rename = "isTerminationAllowed", default, skip_serializing_if = "Option::is_none")]
pub is_termination_allowed: Option<bool>,
#[serde(rename = "replicationMode", default, skip_serializing_if = "Option::is_none")]
pub replication_mode: Option<String>,
#[serde(rename = "partnerServer", default, skip_serializing_if = "Option::is_none")]
pub partner_server: Option<String>,
#[serde(rename = "partnerDatabase", default, skip_serializing_if = "Option::is_none")]
pub partner_database: Option<String>,
#[serde(rename = "partnerLocation", default, skip_serializing_if = "Option::is_none")]
pub partner_location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub role: Option<replication_link_properties::Role>,
#[serde(rename = "partnerRole", default, skip_serializing_if = "Option::is_none")]
pub partner_role: Option<replication_link_properties::PartnerRole>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "percentComplete", default, skip_serializing_if = "Option::is_none")]
pub percent_complete: Option<i32>,
#[serde(rename = "replicationState", default, skip_serializing_if = "Option::is_none")]
pub replication_state: Option<replication_link_properties::ReplicationState>,
}
pub mod replication_link_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Role {
Primary,
Secondary,
NonReadableSecondary,
Source,
Copy,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PartnerRole {
Primary,
Secondary,
NonReadableSecondary,
Source,
Copy,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReplicationState {
#[serde(rename = "PENDING")]
Pending,
#[serde(rename = "SEEDING")]
Seeding,
#[serde(rename = "CATCH_UP")]
CatchUp,
#[serde(rename = "SUSPENDED")]
Suspended,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReplicationLink {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ReplicationLinkProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReplicationLinkListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ReplicationLink>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UnlinkParameters {
#[serde(rename = "forcedTermination", default, skip_serializing_if = "Option::is_none")]
pub forced_termination: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerAdministratorProperties {
#[serde(rename = "administratorType")]
pub administrator_type: server_administrator_properties::AdministratorType,
pub login: String,
pub sid: String,
#[serde(rename = "tenantId")]
pub tenant_id: String,
}
pub mod server_administrator_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AdministratorType {
ActiveDirectory,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerAzureAdAdministrator {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServerAdministratorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerAdministratorListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ServerAzureAdAdministrator>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerCommunicationLinkProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<String>,
#[serde(rename = "partnerServer")]
pub partner_server: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerCommunicationLink {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServerCommunicationLinkProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerCommunicationLinkListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ServerCommunicationLink>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceObjective {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServiceObjectiveProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceObjectiveProperties {
#[serde(rename = "serviceObjectiveName", default, skip_serializing_if = "Option::is_none")]
pub service_objective_name: Option<String>,
#[serde(rename = "isDefault", default, skip_serializing_if = "Option::is_none")]
pub is_default: Option<bool>,
#[serde(rename = "isSystem", default, skip_serializing_if = "Option::is_none")]
pub is_system: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceObjectiveListResult {
pub value: Vec<ServiceObjective>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerUsage {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "resourceName", default, skip_serializing_if = "Option::is_none")]
pub resource_name: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")]
pub current_value: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<String>,
#[serde(rename = "nextResetTime", default, skip_serializing_if = "Option::is_none")]
pub next_reset_time: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerUsageListResult {
pub value: Vec<ServerUsage>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabaseUsage {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "resourceName", default, skip_serializing_if = "Option::is_none")]
pub resource_name: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")]
pub current_value: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<String>,
#[serde(rename = "nextResetTime", default, skip_serializing_if = "Option::is_none")]
pub next_reset_time: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabaseUsageListResult {
pub value: Vec<DatabaseUsage>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LocationCapabilities {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "supportedServerVersions", default, skip_serializing_if = "Vec::is_empty")]
pub supported_server_versions: Vec<ServerVersionCapability>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<location_capabilities::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
}
pub mod location_capabilities {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Visible,
Available,
Default,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerVersionCapability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "supportedEditions", default, skip_serializing_if = "Vec::is_empty")]
pub supported_editions: Vec<EditionCapability>,
#[serde(rename = "supportedElasticPoolEditions", default, skip_serializing_if = "Vec::is_empty")]
pub supported_elastic_pool_editions: Vec<ElasticPoolEditionCapability>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<server_version_capability::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
}
pub mod server_version_capability {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Visible,
Available,
Default,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EditionCapability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "supportedServiceLevelObjectives", default, skip_serializing_if = "Vec::is_empty")]
pub supported_service_level_objectives: Vec<ServiceLevelObjectiveCapability>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<edition_capability::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
}
pub mod edition_capability {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Visible,
Available,
Default,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolEditionCapability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "supportedElasticPoolDtus", default, skip_serializing_if = "Vec::is_empty")]
pub supported_elastic_pool_dtus: Vec<ElasticPoolDtuCapability>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<elastic_pool_edition_capability::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
}
pub mod elastic_pool_edition_capability {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Visible,
Available,
Default,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceLevelObjectiveCapability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "supportedMaxSizes", default, skip_serializing_if = "Vec::is_empty")]
pub supported_max_sizes: Vec<MaxSizeCapability>,
#[serde(rename = "performanceLevel", default, skip_serializing_if = "Option::is_none")]
pub performance_level: Option<PerformanceLevelCapability>,
#[serde(rename = "includedMaxSize", default, skip_serializing_if = "Option::is_none")]
pub included_max_size: Option<MaxSizeCapability>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<service_level_objective_capability::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
}
pub mod service_level_objective_capability {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Visible,
Available,
Default,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolDtuCapability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i32>,
#[serde(rename = "maxDatabaseCount", default, skip_serializing_if = "Option::is_none")]
pub max_database_count: Option<i32>,
#[serde(rename = "includedMaxSize", default, skip_serializing_if = "Option::is_none")]
pub included_max_size: Option<MaxSizeCapability>,
#[serde(rename = "supportedMaxSizes", default, skip_serializing_if = "Vec::is_empty")]
pub supported_max_sizes: Vec<MaxSizeCapability>,
#[serde(rename = "supportedPerDatabaseMaxSizes", default, skip_serializing_if = "Vec::is_empty")]
pub supported_per_database_max_sizes: Vec<MaxSizeCapability>,
#[serde(rename = "supportedPerDatabaseMaxDtus", default, skip_serializing_if = "Vec::is_empty")]
pub supported_per_database_max_dtus: Vec<ElasticPoolPerDatabaseMaxDtuCapability>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<elastic_pool_dtu_capability::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
}
pub mod elastic_pool_dtu_capability {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Visible,
Available,
Default,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MaxSizeCapability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<max_size_capability::Unit>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<max_size_capability::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
}
pub mod max_size_capability {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Megabytes,
Gigabytes,
Terabytes,
Petabytes,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Visible,
Available,
Default,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PerformanceLevelCapability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<performance_level_capability::Unit>,
}
pub mod performance_level_capability {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
#[serde(rename = "DTU")]
Dtu,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolPerDatabaseMaxDtuCapability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i32>,
#[serde(rename = "supportedPerDatabaseMinDtus", default, skip_serializing_if = "Vec::is_empty")]
pub supported_per_database_min_dtus: Vec<ElasticPoolPerDatabaseMinDtuCapability>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<elastic_pool_per_database_max_dtu_capability::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
}
pub mod elastic_pool_per_database_max_dtu_capability {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Visible,
Available,
Default,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ElasticPoolPerDatabaseMinDtuCapability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<elastic_pool_per_database_min_dtu_capability::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
}
pub mod elastic_pool_per_database_min_dtu_capability {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Visible,
Available,
Default,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabaseBlobAuditingPolicyProperties {
pub state: database_blob_auditing_policy_properties::State,
#[serde(rename = "storageEndpoint", default, skip_serializing_if = "Option::is_none")]
pub storage_endpoint: Option<String>,
#[serde(rename = "storageAccountAccessKey", default, skip_serializing_if = "Option::is_none")]
pub storage_account_access_key: Option<String>,
#[serde(rename = "retentionDays", default, skip_serializing_if = "Option::is_none")]
pub retention_days: Option<i32>,
#[serde(rename = "auditActionsAndGroups", default, skip_serializing_if = "Vec::is_empty")]
pub audit_actions_and_groups: Vec<String>,
#[serde(rename = "storageAccountSubscriptionId", default, skip_serializing_if = "Option::is_none")]
pub storage_account_subscription_id: Option<String>,
#[serde(rename = "isStorageSecondaryKeyInUse", default, skip_serializing_if = "Option::is_none")]
pub is_storage_secondary_key_in_use: Option<bool>,
#[serde(rename = "isAzureMonitorTargetEnabled", default, skip_serializing_if = "Option::is_none")]
pub is_azure_monitor_target_enabled: Option<bool>,
#[serde(rename = "queueDelayMs", default, skip_serializing_if = "Option::is_none")]
pub queue_delay_ms: Option<i32>,
}
pub mod database_blob_auditing_policy_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabaseBlobAuditingPolicy {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DatabaseBlobAuditingPolicyProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabaseBlobAuditingPolicyListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DatabaseBlobAuditingPolicy>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionProtectorListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<EncryptionProtector>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionProtectorProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subregion: Option<String>,
#[serde(rename = "serverKeyName", default, skip_serializing_if = "Option::is_none")]
pub server_key_name: Option<String>,
#[serde(rename = "serverKeyType")]
pub server_key_type: encryption_protector_properties::ServerKeyType,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub uri: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub thumbprint: Option<String>,
}
pub mod encryption_protector_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServerKeyType {
ServiceManaged,
AzureKeyVault,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionProtector {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<EncryptionProtectorProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FailoverGroupProperties {
#[serde(rename = "readWriteEndpoint")]
pub read_write_endpoint: FailoverGroupReadWriteEndpoint,
#[serde(rename = "readOnlyEndpoint", default, skip_serializing_if = "Option::is_none")]
pub read_only_endpoint: Option<FailoverGroupReadOnlyEndpoint>,
#[serde(rename = "replicationRole", default, skip_serializing_if = "Option::is_none")]
pub replication_role: Option<failover_group_properties::ReplicationRole>,
#[serde(rename = "replicationState", default, skip_serializing_if = "Option::is_none")]
pub replication_state: Option<String>,
#[serde(rename = "partnerServers")]
pub partner_servers: Vec<PartnerInfo>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub databases: Vec<String>,
}
pub mod failover_group_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReplicationRole {
Primary,
Secondary,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FailoverGroupReadWriteEndpoint {
#[serde(rename = "failoverPolicy")]
pub failover_policy: failover_group_read_write_endpoint::FailoverPolicy,
#[serde(
rename = "failoverWithDataLossGracePeriodMinutes",
default,
skip_serializing_if = "Option::is_none"
)]
pub failover_with_data_loss_grace_period_minutes: Option<i32>,
}
pub mod failover_group_read_write_endpoint {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum FailoverPolicy {
Manual,
Automatic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FailoverGroupReadOnlyEndpoint {
#[serde(rename = "failoverPolicy", default, skip_serializing_if = "Option::is_none")]
pub failover_policy: Option<failover_group_read_only_endpoint::FailoverPolicy>,
}
pub mod failover_group_read_only_endpoint {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum FailoverPolicy {
Disabled,
Enabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PartnerInfo {
pub id: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "replicationRole", default, skip_serializing_if = "Option::is_none")]
pub replication_role: Option<partner_info::ReplicationRole>,
}
pub mod partner_info {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReplicationRole {
Primary,
Secondary,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FailoverGroup {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<FailoverGroupProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FailoverGroupUpdate {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<FailoverGroupUpdateProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FailoverGroupUpdateProperties {
#[serde(rename = "readWriteEndpoint", default, skip_serializing_if = "Option::is_none")]
pub read_write_endpoint: Option<FailoverGroupReadWriteEndpoint>,
#[serde(rename = "readOnlyEndpoint", default, skip_serializing_if = "Option::is_none")]
pub read_only_endpoint: Option<FailoverGroupReadOnlyEndpoint>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub databases: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FailoverGroupListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<FailoverGroup>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedInstanceListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ManagedInstance>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedInstanceProperties {
#[serde(rename = "managedInstanceCreateMode", default, skip_serializing_if = "Option::is_none")]
pub managed_instance_create_mode: Option<managed_instance_properties::ManagedInstanceCreateMode>,
#[serde(rename = "fullyQualifiedDomainName", default, skip_serializing_if = "Option::is_none")]
pub fully_qualified_domain_name: Option<String>,
#[serde(rename = "administratorLogin", default, skip_serializing_if = "Option::is_none")]
pub administrator_login: Option<String>,
#[serde(rename = "administratorLoginPassword", default, skip_serializing_if = "Option::is_none")]
pub administrator_login_password: Option<String>,
#[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")]
pub subnet_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<String>,
#[serde(rename = "licenseType", default, skip_serializing_if = "Option::is_none")]
pub license_type: Option<managed_instance_properties::LicenseType>,
#[serde(rename = "vCores", default, skip_serializing_if = "Option::is_none")]
pub v_cores: Option<i32>,
#[serde(rename = "storageSizeInGB", default, skip_serializing_if = "Option::is_none")]
pub storage_size_in_gb: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub collation: Option<String>,
#[serde(rename = "dnsZone", default, skip_serializing_if = "Option::is_none")]
pub dns_zone: Option<String>,
#[serde(rename = "dnsZonePartner", default, skip_serializing_if = "Option::is_none")]
pub dns_zone_partner: Option<String>,
#[serde(rename = "publicDataEndpointEnabled", default, skip_serializing_if = "Option::is_none")]
pub public_data_endpoint_enabled: Option<bool>,
#[serde(rename = "sourceManagedInstanceId", default, skip_serializing_if = "Option::is_none")]
pub source_managed_instance_id: Option<String>,
#[serde(rename = "restorePointInTime", default, skip_serializing_if = "Option::is_none")]
pub restore_point_in_time: Option<String>,
#[serde(rename = "proxyOverride", default, skip_serializing_if = "Option::is_none")]
pub proxy_override: Option<managed_instance_properties::ProxyOverride>,
#[serde(rename = "timezoneId", default, skip_serializing_if = "Option::is_none")]
pub timezone_id: Option<String>,
#[serde(rename = "instancePoolId", default, skip_serializing_if = "Option::is_none")]
pub instance_pool_id: Option<String>,
#[serde(rename = "maintenanceConfigurationId", default, skip_serializing_if = "Option::is_none")]
pub maintenance_configuration_id: Option<String>,
#[serde(rename = "minimalTlsVersion", default, skip_serializing_if = "Option::is_none")]
pub minimal_tls_version: Option<String>,
}
pub mod managed_instance_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ManagedInstanceCreateMode {
Default,
PointInTimeRestore,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LicenseType {
LicenseIncluded,
BasePrice,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProxyOverride {
Proxy,
Redirect,
Default,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedInstance {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<ResourceIdentity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ManagedInstanceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedInstanceUpdate {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ManagedInstanceProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<OperationDisplay>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<operation::Origin>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Origin {
#[serde(rename = "user")]
User,
#[serde(rename = "system")]
System,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationDisplay {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerKeyListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ServerKey>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerKeyProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subregion: Option<String>,
#[serde(rename = "serverKeyType")]
pub server_key_type: server_key_properties::ServerKeyType,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub uri: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub thumbprint: Option<String>,
#[serde(rename = "creationDate", default, skip_serializing_if = "Option::is_none")]
pub creation_date: Option<String>,
}
pub mod server_key_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServerKeyType {
ServiceManaged,
AzureKeyVault,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerKey {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServerKeyProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Server>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerProperties {
#[serde(rename = "administratorLogin", default, skip_serializing_if = "Option::is_none")]
pub administrator_login: Option<String>,
#[serde(rename = "administratorLoginPassword", default, skip_serializing_if = "Option::is_none")]
pub administrator_login_password: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<String>,
#[serde(rename = "fullyQualifiedDomainName", default, skip_serializing_if = "Option::is_none")]
pub fully_qualified_domain_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Server {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<ResourceIdentity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServerProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerUpdate {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServerProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncAgentProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "syncDatabaseId", default, skip_serializing_if = "Option::is_none")]
pub sync_database_id: Option<String>,
#[serde(rename = "lastAliveTime", default, skip_serializing_if = "Option::is_none")]
pub last_alive_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<sync_agent_properties::State>,
#[serde(rename = "isUpToDate", default, skip_serializing_if = "Option::is_none")]
pub is_up_to_date: Option<bool>,
#[serde(rename = "expiryTime", default, skip_serializing_if = "Option::is_none")]
pub expiry_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
pub mod sync_agent_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Online,
Offline,
NeverConnected,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncAgent {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SyncAgentProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncAgentListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SyncAgent>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncAgentKeyProperties {
#[serde(rename = "syncAgentKey", default, skip_serializing_if = "Option::is_none")]
pub sync_agent_key: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncAgentLinkedDatabaseListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SyncAgentLinkedDatabase>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncAgentLinkedDatabaseProperties {
#[serde(rename = "databaseType", default, skip_serializing_if = "Option::is_none")]
pub database_type: Option<sync_agent_linked_database_properties::DatabaseType>,
#[serde(rename = "databaseId", default, skip_serializing_if = "Option::is_none")]
pub database_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "serverName", default, skip_serializing_if = "Option::is_none")]
pub server_name: Option<String>,
#[serde(rename = "databaseName", default, skip_serializing_if = "Option::is_none")]
pub database_name: Option<String>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<String>,
}
pub mod sync_agent_linked_database_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DatabaseType {
AzureSqlDatabase,
SqlServerDatabase,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncAgentLinkedDatabase {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SyncAgentLinkedDatabaseProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncDatabaseIdListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SyncDatabaseIdProperties>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncDatabaseIdProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncFullSchemaPropertiesListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SyncFullSchemaProperties>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncFullSchemaProperties {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tables: Vec<SyncFullSchemaTable>,
#[serde(rename = "lastUpdateTime", default, skip_serializing_if = "Option::is_none")]
pub last_update_time: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncFullSchemaTable {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub columns: Vec<SyncFullSchemaTableColumn>,
#[serde(rename = "errorId", default, skip_serializing_if = "Option::is_none")]
pub error_id: Option<String>,
#[serde(rename = "hasError", default, skip_serializing_if = "Option::is_none")]
pub has_error: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "quotedName", default, skip_serializing_if = "Option::is_none")]
pub quoted_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncFullSchemaTableColumn {
#[serde(rename = "dataSize", default, skip_serializing_if = "Option::is_none")]
pub data_size: Option<String>,
#[serde(rename = "dataType", default, skip_serializing_if = "Option::is_none")]
pub data_type: Option<String>,
#[serde(rename = "errorId", default, skip_serializing_if = "Option::is_none")]
pub error_id: Option<String>,
#[serde(rename = "hasError", default, skip_serializing_if = "Option::is_none")]
pub has_error: Option<bool>,
#[serde(rename = "isPrimaryKey", default, skip_serializing_if = "Option::is_none")]
pub is_primary_key: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "quotedName", default, skip_serializing_if = "Option::is_none")]
pub quoted_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroupLogListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SyncGroupLogProperties>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroupLogProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timestamp: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<sync_group_log_properties::Type>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub source: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub details: Option<String>,
#[serde(rename = "tracingId", default, skip_serializing_if = "Option::is_none")]
pub tracing_id: Option<String>,
#[serde(rename = "operationStatus", default, skip_serializing_if = "Option::is_none")]
pub operation_status: Option<String>,
}
pub mod sync_group_log_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
All,
Error,
Warning,
Success,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroupProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub interval: Option<i32>,
#[serde(rename = "lastSyncTime", default, skip_serializing_if = "Option::is_none")]
pub last_sync_time: Option<String>,
#[serde(rename = "conflictResolutionPolicy", default, skip_serializing_if = "Option::is_none")]
pub conflict_resolution_policy: Option<sync_group_properties::ConflictResolutionPolicy>,
#[serde(rename = "syncDatabaseId", default, skip_serializing_if = "Option::is_none")]
pub sync_database_id: Option<String>,
#[serde(rename = "hubDatabaseUserName", default, skip_serializing_if = "Option::is_none")]
pub hub_database_user_name: Option<String>,
#[serde(rename = "hubDatabasePassword", default, skip_serializing_if = "Option::is_none")]
pub hub_database_password: Option<String>,
#[serde(rename = "syncState", default, skip_serializing_if = "Option::is_none")]
pub sync_state: Option<sync_group_properties::SyncState>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<SyncGroupSchema>,
}
pub mod sync_group_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ConflictResolutionPolicy {
HubWin,
MemberWin,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SyncState {
NotReady,
Error,
Warning,
Progressing,
Good,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroupSchema {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tables: Vec<SyncGroupSchemaTable>,
#[serde(rename = "masterSyncMemberName", default, skip_serializing_if = "Option::is_none")]
pub master_sync_member_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroupSchemaTable {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub columns: Vec<SyncGroupSchemaTableColumn>,
#[serde(rename = "quotedName", default, skip_serializing_if = "Option::is_none")]
pub quoted_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroupSchemaTableColumn {
#[serde(rename = "quotedName", default, skip_serializing_if = "Option::is_none")]
pub quoted_name: Option<String>,
#[serde(rename = "dataSize", default, skip_serializing_if = "Option::is_none")]
pub data_size: Option<String>,
#[serde(rename = "dataType", default, skip_serializing_if = "Option::is_none")]
pub data_type: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroup {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SyncGroupProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncGroupListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SyncGroup>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncMemberProperties {
#[serde(rename = "databaseType", default, skip_serializing_if = "Option::is_none")]
pub database_type: Option<sync_member_properties::DatabaseType>,
#[serde(rename = "syncAgentId", default, skip_serializing_if = "Option::is_none")]
pub sync_agent_id: Option<String>,
#[serde(rename = "sqlServerDatabaseId", default, skip_serializing_if = "Option::is_none")]
pub sql_server_database_id: Option<String>,
#[serde(rename = "serverName", default, skip_serializing_if = "Option::is_none")]
pub server_name: Option<String>,
#[serde(rename = "databaseName", default, skip_serializing_if = "Option::is_none")]
pub database_name: Option<String>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
#[serde(rename = "syncDirection", default, skip_serializing_if = "Option::is_none")]
pub sync_direction: Option<sync_member_properties::SyncDirection>,
#[serde(rename = "syncState", default, skip_serializing_if = "Option::is_none")]
pub sync_state: Option<sync_member_properties::SyncState>,
}
pub mod sync_member_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DatabaseType {
AzureSqlDatabase,
SqlServerDatabase,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SyncDirection {
Bidirectional,
OneWayMemberToHub,
OneWayHubToMember,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SyncState {
SyncInProgress,
SyncSucceeded,
SyncFailed,
DisabledTombstoneCleanup,
DisabledBackupRestore,
SyncSucceededWithWarnings,
SyncCancelling,
SyncCancelled,
UnProvisioned,
Provisioning,
Provisioned,
ProvisionFailed,
DeProvisioning,
DeProvisioned,
DeProvisionFailed,
Reprovisioning,
ReprovisionFailed,
UnReprovisioned,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncMember {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SyncMemberProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SyncMemberListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SyncMember>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionUsageListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SubscriptionUsage>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionUsageProperties {
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")]
pub current_value: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionUsage {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SubscriptionUsageProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualClusterListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<VirtualCluster>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualClusterProperties {
#[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")]
pub subnet_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub family: Option<String>,
#[serde(rename = "childResources", default, skip_serializing_if = "Vec::is_empty")]
pub child_resources: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TrackedResource {
#[serde(flatten)]
pub resource: Resource,
pub location: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualCluster {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualClusterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualClusterUpdate {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualClusterProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkRuleProperties {
#[serde(rename = "virtualNetworkSubnetId")]
pub virtual_network_subnet_id: String,
#[serde(rename = "ignoreMissingVnetServiceEndpoint", default, skip_serializing_if = "Option::is_none")]
pub ignore_missing_vnet_service_endpoint: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<virtual_network_rule_properties::State>,
}
pub mod virtual_network_rule_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Initializing,
InProgress,
Ready,
Deleting,
Unknown,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkRule {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualNetworkRuleProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkRuleListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<VirtualNetworkRule>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProxyResource {
#[serde(flatten)]
pub resource: Resource,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceIdentity {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<resource_identity::Type>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
}
pub mod resource_identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
None,
SystemAssigned,
UserAssigned,
#[serde(rename = "SystemAssigned,UserAssigned")]
SystemAssignedUserAssigned,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sku {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub size: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub family: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<i32>,
}
|
fn factorial(n:usize) -> usize {
if n < 1 {
1
} else {
factorial(n - 1) * n
}
}
fn main() {
println!("factorial(13): {}", factorial(13));
}
|
use std::fs::File;
use std::io;
use std::fs;
use std::io::Read;
use std::io::ErrorKind;
use std::error::Error;
// Main function can return a Result<T, E> if necessary
// Box<dyn Error> is called a "trait object"
// Can read this to mean "any kind of error"
fn main() -> Result<(), Box<dyn Error>> {
// thread 'main' panicked at 'crash and burn', src/main.rs:2:5
// note: Run with `RUST_BACKTRACE=1` environment variable to display a backtrace.
// panic!("crash and burn");
let v = vec![1, 2, 3];
// hread 'main' panicked at 'index out of bounds:
// the len is 3 but the index is 99
// in other languages like C, it will attempt to give you exactly
// what you asked for in this situation, even tough it isn't
// what you want
// This is called a "buffer overread"
// 12: error_handling::main
// at src/main.rs:14
// v[99];
// **Recoverable Results with Result
// We know File::open results a Result b/c of docs and compiler will
// also tell us
// FROM COMPILER
// note: expected type `u32`
// found type `std::result::Result<std::fs::File, std::io::Error>`
// This tells us the return type of the File::open function is a
// Result<T, E>
// The generic parameter T has been filled in here with the type of the
// success value, std::fs::File
// The type of E used in the error value is std::io::Error
// let f: u32 = File::open("hello.txt");
// let f = File::open("hello.txt");
// thread 'main' panicked at 'There was a problem opening the file Os
// code: 2, kind: NotFound, message: "No such file or directory" }', src/main.rs:41:13
// let f = match f {
// Ok(file) => file,
// Err(error) => {
// panic!("There was a problem opening the file {:?}", error);
// }
// };
// **Matching on Different Errors**
// Code above will `panic!` no matter why `File::open` failed
// let f = match f {
// Ok(file) => file,
// // This returns an io::Error which is a struct provided by std lib
// Err(error) => match error.kind() {
// ErrorKind::NotFound => match File::create("hello.txt") {
// Ok(fc) => fc,
// Err(e) => panic!("Tried to create file but there was a problem: {:?}", e),
// },
// other_error => panic!("There was a problem opening the file: {:?}", other_error),
// }
// };
// shortcut instead of using match
// If the Result value is the Ok varaint, unwrap will return teh value inside the Ok
// If the Result is the Error variant, unwrap will call the panic! macro for us
// let z = File::open("hello.txt").unwrap();
// **expect**
// Can use expect in the same way as unwrap
// We can control error message in this case
// If we use unwrap in multiple places, it can take more time to figure out
// exaclty which unwarp is causing the panic
// let x = File::open("hello.txt").expect("Failed to open hello.txt");
// **Propogating errors**
// When you're writing a function whose implementation calls something
// that might fail, instead of handling the error within this function,
// you can return the error to the calling code so that it can decide what to do
// This is know as "propagating" the error and gives more control
// to the calling code, where there might be more information or
// logic that dictates how to handle the error
// Reads a username from a file
// If the file doesn't exist or can't be read
// this function will return thsoe errors to the code that
// called this function
// This function is returning a value of type Result<T, E>
// Generic T is fulfilled with String
// E is fulfilled with io::Error
fn read_username_from_file() -> Result<String, io::Error> {
let f = File::open("hello.txt");
let mut f = match f {
Ok(file) => file,
// Return here b/c there is nothing else we can do
Err(e) => return Err(e)
};
let mut s = String::new();
// Don't need to return here because it is the last thing to run
match f.read_to_string(&mut s) {
Ok(_) => Ok(s),
Err(e) => Err(e)
}
}
// **Shortcut for Propagating Errors: the ? Operator
// If the value of the Result is an `Ok`, the value inside the `Ok`
// will get returned from this expression
// If the value is an `Err`, the `Err` will be returned from the whole
// function as if we had used the `return` keyword so the error value
// gets propagated to the calling code
// If an error occurs, the ? will return early out of the whole function
fn read_username_from_file_improved() -> Result<String, io::Error> {
let mut f = File::open("hello.txt")?;
let mut s = String::new();
f.read_to_string(&mut s)?;
Ok(s)
}
fn read_username_from_file_improved_again() -> Result<String, io::Error> {
let mut s = String::new();
File::open("hello.txt")?.read_to_string(&mut s)?;
Ok(s)
}
// Rust provides a convenience function called fs::read_to_string
// that will open the file, create a new `String`, read the contents of the file,
// and put the contents into that `String` and then return it
fn read_username_from_file_final() -> Result<String, io::Error> {
fs::read_to_string("hello.txt")
}
// **The ? Operator Can Only Be Used in Functions That Return Result
// The ? operator can only be used in functions that have a return type of Result
// THIS WON'T COMPILED BECAUSE RETURN TYPE FROM FUNCTION IS NOT RESULT
// the `?` operator can only be used in a function that returns `Result`
// or `Option` (or another type that implements `std::ops::Try`)
let f = File::open("hello.txt")?;
Ok(())
}
|
//! Rejections
//!
//! Part of the power of the [`Filter`](../trait.Filter.html) system is being able to
//! reject a request from a filter chain. This allows for filters to be
//! combined with `or`, so that if one side of the chain finds that a request
//! doesn't fulfill its requirements, the other side can try to process
//! the request.
//!
//! Many of the built-in [`filters`](../filters) will automatically reject
//! the request with an appropriate rejection. However, you can also build
//! new custom [`Filter`](../trait.Filter.html)s and still want other routes to be
//! matchable in the case a predicate doesn't hold.
//!
//! # Example
//!
//! ```
//! use warp::Filter;
//!
//! // Filter on `/:id`, but reject with 400 if the `id` is `0`.
//! let route = warp::path::param()
//! .and_then(|id: u32| {
//! if id == 0 {
//! Err(warp::reject())
//! } else {
//! Ok("something since id is valid")
//! }
//! });
//! ```
use std::error::Error as StdError;
use http;
use serde;
use serde_json;
use ::never::Never;
pub(crate) use self::sealed::{CombineRejection, Reject};
/// Error cause for a rejection.
pub type Cause = Box<StdError + Send + Sync>;
/// Rejects a request with a default `400 Bad Request`.
#[inline]
pub fn reject() -> Rejection {
bad_request()
}
/// Rejects a request with `400 Bad Request`.
#[inline]
pub fn bad_request() -> Rejection {
Reason::BAD_REQUEST.into()
}
/// Rejects a request with `403 Forbidden`
#[inline]
pub fn forbidden() -> Rejection {
Reason::FORBIDDEN.into()
}
/// Rejects a request with `404 Not Found`.
#[inline]
pub fn not_found() -> Rejection {
Reason::empty().into()
}
// 405 Method Not Allowed
#[inline]
pub(crate) fn method_not_allowed() -> Rejection {
Reason::METHOD_NOT_ALLOWED.into()
}
// 411 Length Required
#[inline]
pub(crate) fn length_required() -> Rejection {
Reason::LENGTH_REQUIRED.into()
}
// 413 Payload Too Large
#[inline]
pub(crate) fn payload_too_large() -> Rejection {
Reason::PAYLOAD_TOO_LARGE.into()
}
// 415 Unsupported Media Type
//
// Used by the body filters if the request payload content-type doesn't match
// what can be deserialized.
#[inline]
pub(crate) fn unsupported_media_type() -> Rejection {
Reason::UNSUPPORTED_MEDIA_TYPE.into()
}
/// Rejects a request with `500 Internal Server Error`.
#[inline]
pub fn server_error() -> Rejection {
Reason::SERVER_ERROR.into()
}
/// Rejection of a request by a [`Filter`](::Filter).
#[derive(Debug)]
pub struct Rejection {
reason: Reason,
cause: Option<Cause>,
}
bitflags! {
struct Reason: u8 {
// NOT_FOUND = 0
const BAD_REQUEST = 0b00000001;
const METHOD_NOT_ALLOWED = 0b00000010;
const LENGTH_REQUIRED = 0b00000100;
const PAYLOAD_TOO_LARGE = 0b00001000;
const UNSUPPORTED_MEDIA_TYPE = 0b00010000;
const FORBIDDEN = 0b00100000;
// SERVER_ERROR has to be the last reason, to avoid shadowing it when combining rejections
const SERVER_ERROR = 0b10000000;
}
}
impl Rejection {
/// Return the HTTP status code that this rejection represents.
pub fn status(&self) -> http::StatusCode {
Reject::status(self)
}
/// Add given `err` into `Rejection`.
pub fn with<E>(self, err: E) -> Self
where
E: Into<Cause>,
{
let cause = Some(err.into());
Self {
cause,
.. self
}
}
/// Returns a json response for this rejection.
pub fn json(&self) -> ::reply::Response {
use http::header::{CONTENT_TYPE, HeaderValue};
use hyper::Body;
let code = self.status();
let mut res = http::Response::default();
*res.status_mut() = code;
res.headers_mut().insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
*res.body_mut() = match serde_json::to_string(&self) {
Ok(body) => Body::from(body),
Err(_) => Body::from("{}"),
};
res
}
/// Returns an error cause.
pub fn cause(&self) -> Option<&Cause> {
if let Some(ref err) = self.cause {
return Some(&err)
}
None
}
/// Turn into cause of type `T`.
pub fn into_cause<T>(self) -> Result<Box<T>, Self>
where
T: StdError + Send + Sync + 'static
{
let err = match self.cause {
Some(err) => err,
None => return Err(self)
};
match err.downcast::<T>() {
Ok(err) => Ok(err),
Err(other) => Err(Rejection {
reason: self.reason,
cause: Some(other)
})
}
}
}
#[doc(hidden)]
impl From<Reason> for Rejection {
#[inline]
fn from(reason: Reason) -> Rejection {
Rejection {
reason,
cause: None,
}
}
}
impl From<Never> for Rejection {
#[inline]
fn from(never: Never) -> Rejection {
match never {}
}
}
impl Reject for Never {
fn status(&self) -> http::StatusCode {
match *self {}
}
fn into_response(self) -> ::reply::Response {
match self {}
}
fn cause(&self) -> Option<&Cause> {
None
}
}
impl Reject for Rejection {
fn status(&self) -> http::StatusCode {
if self.reason.contains(Reason::SERVER_ERROR) {
http::StatusCode::INTERNAL_SERVER_ERROR
} else if self.reason.contains(Reason::FORBIDDEN) {
http::StatusCode::FORBIDDEN
} else if self.reason.contains(Reason::UNSUPPORTED_MEDIA_TYPE) {
http::StatusCode::UNSUPPORTED_MEDIA_TYPE
} else if self.reason.contains(Reason::LENGTH_REQUIRED) {
http::StatusCode::LENGTH_REQUIRED
} else if self.reason.contains(Reason::PAYLOAD_TOO_LARGE) {
http::StatusCode::PAYLOAD_TOO_LARGE
} else if self.reason.contains(Reason::BAD_REQUEST) {
http::StatusCode::BAD_REQUEST
} else if self.reason.contains(Reason::METHOD_NOT_ALLOWED) {
http::StatusCode::METHOD_NOT_ALLOWED
} else {
debug_assert!(self.reason.is_empty());
http::StatusCode::NOT_FOUND
}
}
fn into_response(self) -> ::reply::Response {
use http::header::{CONTENT_TYPE, HeaderValue};
use hyper::Body;
let code = self.status();
let mut res = http::Response::default();
*res.status_mut() = code;
match self.cause {
Some(err) => {
let bytes = format!("{}", err);
res.headers_mut().insert(CONTENT_TYPE, HeaderValue::from_static("text/plain"));
*res.body_mut() = Body::from(bytes);
},
None => {}
}
res
}
#[inline]
fn cause(&self) -> Option<&Cause> {
Rejection::cause(&self)
}
}
impl serde::Serialize for Rejection {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer
{
use serde::ser::SerializeMap;
let mut map = serializer.serialize_map(None)?;
let err = match self.cause {
Some(ref err) => err,
None => return map.end()
};
map.serialize_key("description").and_then(|_| map.serialize_value(err.description()))?;
map.serialize_key("message").and_then(|_| map.serialize_value(&err.to_string()))?;
map.end()
}
}
mod sealed {
use ::never::Never;
use super::{Cause, Rejection};
pub trait Reject: ::std::fmt::Debug + Send {
fn status(&self) -> ::http::StatusCode;
fn into_response(self) -> ::reply::Response;
fn cause(&self) -> Option<&Cause>;
}
fn _assert_object_safe() {
fn _assert(_: &Reject) {}
}
pub trait CombineRejection<E>: Send + Sized {
type Rejection: Reject + From<Self> + From<E>;
fn combine(self, other: E) -> Self::Rejection;
}
impl CombineRejection<Rejection> for Rejection {
type Rejection = Rejection;
fn combine(self, other: Rejection) -> Self::Rejection {
let reason = self.reason | other.reason;
let cause = if self.reason > other.reason {
self.cause
} else {
other.cause
};
Rejection {
reason,
cause
}
}
}
impl CombineRejection<Never> for Rejection {
type Rejection = Rejection;
fn combine(self, other: Never) -> Self::Rejection {
match other {}
}
}
impl CombineRejection<Rejection> for Never {
type Rejection = Rejection;
fn combine(self, _: Rejection) -> Self::Rejection {
match self {}
}
}
impl CombineRejection<Never> for Never {
type Rejection = Never;
fn combine(self, _: Never) -> Self::Rejection {
match self {}
}
}
}
#[cfg(test)]
mod tests {
use http::header::{CONTENT_TYPE};
use super::*;
use http::StatusCode;
#[test]
fn rejection_status() {
assert_eq!(bad_request().status(), StatusCode::BAD_REQUEST);
assert_eq!(forbidden().status(), StatusCode::FORBIDDEN);
assert_eq!(not_found().status(), StatusCode::NOT_FOUND);
assert_eq!(method_not_allowed().status(), StatusCode::METHOD_NOT_ALLOWED);
assert_eq!(length_required().status(), StatusCode::LENGTH_REQUIRED);
assert_eq!(payload_too_large().status(), StatusCode::PAYLOAD_TOO_LARGE);
assert_eq!(unsupported_media_type().status(), StatusCode::UNSUPPORTED_MEDIA_TYPE);
assert_eq!(server_error().status(), StatusCode::INTERNAL_SERVER_ERROR);
}
#[test]
fn combine_rejections() {
let left = bad_request().with("left");
let right = server_error().with("right");
let reject = left.combine(right);
assert_eq!(Reason::BAD_REQUEST | Reason::SERVER_ERROR, reject.reason);
match reject.cause {
Some(err) => assert_eq!("right", err.description()),
err => unreachable!("{:?}", err)
}
}
#[test]
fn combine_rejection_causes_with_some_left_and_none_right() {
let left = bad_request().with("left");
let right = server_error();
match left.combine(right).cause {
None => {},
err => unreachable!("{:?}", err)
}
}
#[test]
fn combine_rejection_causes_with_none_left_and_some_right() {
let left = bad_request();
let right = server_error().with("right");
match left.combine(right).cause {
Some(err) => assert_eq!("right", err.description()),
err => unreachable!("{:?}", err)
}
}
#[test]
fn into_response_with_none_cause() {
let resp = bad_request().into_response();
assert_eq!(400, resp.status());
assert!(resp.headers().get(CONTENT_TYPE).is_none());
assert_eq!("", response_body_string(resp))
}
#[test]
fn into_response_with_some_cause() {
let resp = server_error().with("boom").into_response();
assert_eq!(500, resp.status());
assert_eq!("text/plain", resp.headers().get(CONTENT_TYPE).unwrap());
assert_eq!("boom", response_body_string(resp))
}
#[test]
fn into_json_with_none_cause() {
let resp = bad_request().json();
assert_eq!(400, resp.status());
assert_eq!("application/json", resp.headers().get(CONTENT_TYPE).unwrap());
assert_eq!("{}", response_body_string(resp))
}
#[test]
fn into_json_with_some_cause() {
let resp = bad_request().with("boom").json();
assert_eq!(400, resp.status());
assert_eq!("application/json", resp.headers().get(CONTENT_TYPE).unwrap());
let expected = "{\"description\":\"boom\",\"message\":\"boom\"}";
assert_eq!(expected, response_body_string(resp))
}
fn response_body_string(resp: ::reply::Response) -> String {
use futures::{Future, Stream, Async};
let (_, body) = resp.into_parts();
match body.concat2().poll() {
Ok(Async::Ready(chunk)) => {
String::from_utf8_lossy(&chunk).to_string()
},
err => unreachable!("{:?}", err)
}
}
}
|
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate chan;
extern crate chan_signal;
extern crate dirs;
extern crate notify;
extern crate time;
extern crate toml;
use std::thread;
use std::time::Duration;
use std::sync::mpsc::{channel, Sender, Receiver, TryRecvError};
use std::fs;
use std::path::{Path, PathBuf};
use std::io::Read;
use time::get_time;
use notify::{RecommendedWatcher, Watcher, RecursiveMode, DebouncedEvent};
use chan_signal::Signal;
mod bar;
mod block;
mod module;
mod blocks;
mod util;
mod wm;
use bar::Bar;
use module::Module;
use block::Block;
use blocks::*;
use util::{Align, WindowManagers, run_command, run_i32, run_bg};
#[derive(Debug, Deserialize, PartialEq, Clone)]
struct Config {
bar: CBar,
module: Option<Vec<CModule>>,
}
#[derive(Debug, Deserialize, PartialEq, Clone)]
struct CBar {
update_interval: u64,
separator: Option<String>,
background: Option<String>,
background_opacity: Option<u32>,
foreground: Option<String>,
foreground_opacity: Option<u32>,
wm: Option<String>,
block: Option<Vec<CBlock>>,
}
#[derive(Debug, Deserialize, PartialEq, Clone)]
struct CModule {
align: Option<String>,
separator: Option<String>,
background: Option<String>,
background_opacity: Option<u32>,
foreground: Option<String>,
foreground_opacity: Option<u32>,
block: Option<Vec<CBlock>>,
}
#[derive(Debug, Deserialize, PartialEq, Clone)]
struct CBlock {
kind: String,
icon: Option<String>,
icons: Option<Vec<String>>,
icon_align: Option<String>,
active_icon: Option<String>,
device: Option<String>,
command: Option<String>,
format: Option<String>,
max_chars: Option<usize>,
monitor_battery: Option<bool>,
click_icons: Option<Vec<Vec<String>>>,
click_commands: Option<Vec<Vec<String>>>,
}
fn create_config() -> PathBuf {
let config = match dirs::config_dir() {
Some(path) => path,
None => panic!("Couldn't get config directory!"),
};
let folder = format!("{}/rebar", config.display());
let file = format!("{}/config.toml", folder);
let conf_dir = Path::new(folder.as_str());
let conf_file = Path::new(file.as_str());
// Create config if it doesn't exist
if !conf_dir.exists() {
match fs::create_dir(conf_dir) {
Ok(_) => {},
Err(e) => panic!("Couldn't create config directory! Error: {}", e),
}
}
if !conf_file.exists() {
match fs::File::create(conf_file) {
Ok(_) => {},
Err(e) => panic!("Couldn't create config file! Error: {}", e),
}
}
conf_file.to_path_buf()
}
fn parse_config() -> Config {
let path = create_config();
let mut file = fs::File::open(path).unwrap_or_else(|e| {
panic!("Could not open config file! Error: {}", e);
});
let mut conf_text = String::new();
file.read_to_string(&mut conf_text).unwrap_or_else(|e| {
panic!("Could not read config file! Error: {}", e);
});
let config: Config = toml::from_str(conf_text.as_str()).unwrap_or_else(|e| {
panic!("Could not parse config file! Error: {}", e);
});
config
}
fn align<T: Into<String>>(align_string: T) -> Align {
match align_string.into().as_ref() {
"left" => Align::Left,
"center" => Align::Center,
"right" => Align::Right,
_ => Align::None,
}
}
fn build_block(block: &CBlock) -> Box<Block> {
return match block.kind.as_ref() {
"battery" => {
let mut battery = if let Some(ref monitor) = block.monitor_battery {
Battery::new(*monitor)
} else {
Battery::new(false)
};
// Add icon(s)
if let Some(ref icon_align) = block.icon_align {
if let Some(ref icon) = block.icon {
battery.add_icon(icon.as_str(), align(icon_align.to_owned()));
} else if let Some(ref icons) = block.icons {
battery.add_icons([
icons[0].as_str(),
icons[1].as_str(),
icons[2].as_str(),
], align(icon_align.to_owned()));
}
}
Box::new(battery)
},
"date" => {
// Date needs a format
if let Some(ref format) = block.format {
let mut date = Date::new(format);
if let Some(ref icon_align) = block.icon_align {
if let Some(ref icon) = block.icon {
date.add_icon(icon, align(icon_align.to_owned()));
}
}
Box::new(date)
} else {
panic!("Block 'date' requires field 'format'!");
}
},
"music" => {
let mut music = Music::new();
if let Some(ref icon_align) = block.icon_align {
if let Some(ref icon) = block.icon {
music.add_icon(icon, align(icon_align.to_owned()));
}
}
if let Some(ref command) = block.command {
music.set_command(command);
}
if let Some(ref icons) = block.click_icons {
if let Some(ref commands) = block.click_commands {
if icons.len() != commands.len() {
panic!("Block 'music' needs 'click_icons' and 'click_commands'\
to be the same length.");
}
for (i, icon) in icons.iter().enumerate() {
if icon.len() != commands[i].len() {
panic!("Block 'music' needs 'click_icons' and 'click_commands'\
to be the same length.");
}
}
music.clickable_icons(icons, commands);
} else {
panic!("Block 'music' has 'click_icons' but no 'click_commands'!");
}
}
Box::new(music)
},
"wifi" => {
let mut wifi = Wifi::new();
if let Some(ref icon_align) = block.icon_align {
if let Some(ref icon) = block.icon {
wifi.add_icon(icon, align(icon_align.to_owned()));
} else if let Some(ref icons) = block.icons {
wifi.add_icons([
icons[0].as_str(),
icons[1].as_str(),
icons[2].as_str(),
], align(icon_align.to_owned()));
}
}
if let Some(ref device) = block.device {
wifi.set_device(device);
}
Box::new(wifi)
},
"workspaces" => {
let mut wsp = Wsp::new();
if let (&Some(ref icon), &Some(ref active_icon)) = (&block.icon, &block.active_icon) {
wsp.set_icon(icon.as_str());
wsp.set_active_icon(active_icon.as_str());
} else {
panic!("Block 'workspaces' requires fields 'icon' and 'active_icon'!");
}
Box::new(wsp)
},
"title" => {
let mut max_chars: usize = 50;
if let Some(ref max) = block.max_chars {
max_chars = *max;
}
Box::new(Title::new(max_chars))
},
"custom" => {
let mut custom = Custom::new();
if let Some(ref command) = block.command {
custom.set_command(command.to_owned());
}
Box::new(custom)
},
_ => panic!("Unrecognized kind \"{}\"", block.kind),
}
}
fn build_module(cmodule: &CModule) -> Module {
let mut module = Module::new(align(match cmodule.align {
Some(ref x) => x,
None => "none",
}));
if let Some(ref sep) = cmodule.separator {
module.add_separator(sep.as_str());
}
if let Some(ref bg) = cmodule.background {
module.set_background(bg);
}
if let Some(ref bgo) = cmodule.background_opacity {
module.set_background_opacity(*bgo);
}
if let Some(ref fg) = cmodule.foreground {
module.set_foreground(fg);
}
if let Some(ref fgo) = cmodule.foreground_opacity {
module.set_foreground_opacity(*fgo);
}
if let Some(ref blocks) = cmodule.block {
for block in blocks {
module.add(build_block(&block));
}
}
module
}
fn setup(config: &Config) -> Bar {
// Set up bar
let mut bar = Bar::new(config.bar.update_interval);
if let Some(ref sep) = config.bar.separator {
bar.set_separator(sep);
}
if let Some(ref bg) = config.bar.background {
bar.set_background(bg);
}
if let Some(ref bgo) = config.bar.background_opacity {
bar.set_background_opacity(*bgo);
}
if let Some(ref fg) = config.bar.foreground {
bar.set_foreground(fg);
}
if let Some(ref fgo) = config.bar.foreground_opacity {
bar.set_foreground_opacity(*fgo);
}
// Add blocks
if let Some(ref blocks) = config.bar.block {
for block in blocks {
bar.add_block(build_block(&block));
}
}
// Set up and add modules
if let Some(ref modules) = config.module {
for cmodule in modules {
let mut module = build_module(cmodule);
// If the modules do not have their own colors, inherit from bar
match cmodule.background {
None => {
if let Some(ref bg) = config.bar.background {
module.set_background(bg);
}
if let Some(ref bgo) = config.bar.background_opacity {
module.set_background_opacity(*bgo);
}
},
_ => {}
}
match cmodule.foreground {
None => {
if let Some(ref fg) = config.bar.foreground {
module.set_foreground(fg);
}
if let Some(ref fgo) = config.bar.foreground_opacity {
module.set_foreground_opacity(*fgo);
}
},
_ => {}
}
bar.add_module(module);
}
}
bar
}
fn display(bar: &mut Bar, rx: &Receiver<DebouncedEvent>) {
loop {
bar.run();
thread::sleep(Duration::from_secs(bar.update_interval));
match rx.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => break,
Err(TryRecvError::Empty) => {},
}
}
}
fn subscribe(bar: &mut Bar, wsp: WindowManagers, rx: &Receiver<DebouncedEvent>) {
match wsp {
// Just bspwm for now
_ => run_bg("bspc subscribe | tee /tmp/rebar_subscribe &> /dev/null"),
};
let initial = get_time().sec;
let mut previous = 0;
let mut file_length = run_i32("cat /tmp/rebar_subscribe | wc -l");
loop {
let len = run_i32("cat /tmp/rebar_subscribe | wc -l");
let elapsed = get_time().sec - initial;
// Update on WM action and every `self.update_interval` seconds
if len != file_length {
file_length = len;
bar.run();
} else if elapsed != previous && elapsed as u64 % bar.update_interval == 0 {
previous = elapsed;
bar.run();
}
thread::sleep(Duration::from_millis(100));
match rx.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => break,
Err(TryRecvError::Empty) => {},
}
}
}
fn run(_sdone: chan::Sender<()>) {
let (tx, rx): (Sender<DebouncedEvent>, Receiver<DebouncedEvent>) = channel();
// Monitor config for changes
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2))
.unwrap_or_else(|e| panic!("Error watching config: {}", e));
let _ = watcher.watch(create_config(), RecursiveMode::NonRecursive);
loop {
let config = parse_config();
let mut bar = setup(&config);
// TODO: Subprocess lemonbar
// Run
if let Some(ref wm) = config.bar.wm {
match wm.as_ref() {
"bspwm" => subscribe(&mut bar, WindowManagers::Bspwm, &rx),
_ => display(&mut bar, &rx),
}
}
// When display loop breaks, cleanup and start again
cleanup();
}
}
fn cleanup() {
run_command("killall bspc &> /dev/null");
run_command("rm /tmp/rebar_subscribe");
}
fn main() {
// Listen for SIGINT and SIGTERM
let signal = chan_signal::notify(&[Signal::INT, Signal::TERM]);
let (sdone, rdone) = chan::sync(0);
// Run
let _ = thread::Builder::new().name("main".to_string()).spawn(move || run(sdone));
// Cleanup on kill
chan_select! {
signal.recv() => {
cleanup();
},
rdone.recv() => {
println!("Done!");
}
}
}
|
#[doc = "Register `RCC_SPI2S1CKSELR` reader"]
pub type R = crate::R<RCC_SPI2S1CKSELR_SPEC>;
#[doc = "Register `RCC_SPI2S1CKSELR` writer"]
pub type W = crate::W<RCC_SPI2S1CKSELR_SPEC>;
#[doc = "Field `SPI1SRC` reader - SPI1SRC"]
pub type SPI1SRC_R = crate::FieldReader;
#[doc = "Field `SPI1SRC` writer - SPI1SRC"]
pub type SPI1SRC_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O>;
impl R {
#[doc = "Bits 0:2 - SPI1SRC"]
#[inline(always)]
pub fn spi1src(&self) -> SPI1SRC_R {
SPI1SRC_R::new((self.bits & 7) as u8)
}
}
impl W {
#[doc = "Bits 0:2 - SPI1SRC"]
#[inline(always)]
#[must_use]
pub fn spi1src(&mut self) -> SPI1SRC_W<RCC_SPI2S1CKSELR_SPEC, 0> {
SPI1SRC_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "This register is used to control the selection of the kernel clock for the SPI/I2S1. Note that changing the clock source on-the-fly is allowed, and will not generate any timing violation, however the user has to ensure that both the previous and the new clock sources are present during the switching, and for the whole transition time. Refer to Section: Clock enabling delays.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`rcc_spi2s1ckselr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`rcc_spi2s1ckselr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct RCC_SPI2S1CKSELR_SPEC;
impl crate::RegisterSpec for RCC_SPI2S1CKSELR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`rcc_spi2s1ckselr::R`](R) reader structure"]
impl crate::Readable for RCC_SPI2S1CKSELR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`rcc_spi2s1ckselr::W`](W) writer structure"]
impl crate::Writable for RCC_SPI2S1CKSELR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets RCC_SPI2S1CKSELR to value 0"]
impl crate::Resettable for RCC_SPI2S1CKSELR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use std::path::Path;
use tar::{Builder, Archive};
use walkdir::WalkDir;
use libflate::gzip::{Encoder, Decoder};
use std::io::{Write, Read};
/// this function currently doesn't work hands on write
pub fn compress(archive: &Vec<u8>) -> std::io::Result<Vec<u8>> {
let mut encoder = Encoder::new(Vec::new())?;
println!("encoder created, data len: {}", archive.len());
println!("write: {}", encoder.write(&archive)?);
println!("data written");
encoder.finish().into_result()
}
pub fn decompress(encoded_data: &[u8]) -> std::io::Result<Vec<u8>>{
let mut decoder = Decoder::new(encoded_data)?;
let mut decoded_data = Vec::new();
decoder.read_to_end(&mut decoded_data)?;
Ok(decoded_data)
}
pub fn archive(root: &str) -> std::io::Result<Vec<u8>> {
let archive = Vec::new();
let mut builder = Builder::new(archive);
let results: Vec<std::io::Result<()>> = WalkDir::new(root)
.into_iter()
.map(|p| p.unwrap().into_path())
.filter(|p| p.is_file())
.map(|p| builder.append_path(p))
.collect();
for result in results.into_iter() {
result?
}
builder.into_inner()
}
pub fn dearchive<P: AsRef<Path>>(archive_data: &[u8], root: P) -> std::io::Result<()> {
let mut archive = Archive::new(archive_data);
for entry in archive.entries()? {
println!("path: {}", entry?.path()?.display());
}
Ok(())
}
pub fn download() {}
//thoughts for the future when copying over remote configs an option for complete copy will be available with risks
//this must be concented to and when it occures the configs from the old system will be purged
//despite this the typical remote config pul will simply pull peer information, permissions, and applications
//invalidating all pair keys, and regenerating rsa keys
|
use registry_pol::v1::{RegistryValueType, RegistryValue, parse};
static EMPTY_DATA: &[u8] = include_bytes!("../../test-data/Empty.pol");
static MACHINE_REGISTRY_DATA: &[u8] = include_bytes!("../../test-data/Machine-Registry.pol");
static USER_REGISTRY_DATA: &[u8] = include_bytes!("../../test-data/User-Registry.pol");
#[test]
fn empty() {
assert_eq!(parse(EMPTY_DATA), Ok(vec![]));
}
#[test]
fn machine() {
assert_eq!(parse(MACHINE_REGISTRY_DATA),
Ok(vec![RegistryValue {
key: r"Software\Microsoft\Windows\CurrentVersion\Policies\Explorer".to_string(),
value: Some("NoDriveTypeAutorun".to_string()),
data_type: Some(RegistryValueType::REG_DWORD),
data: Some(vec![0x9E, 0x00, 0x00, 0x00]),
}]));
}
#[test]
fn user() {
assert_eq!(parse(USER_REGISTRY_DATA),
Ok(vec![RegistryValue {
key: r"Software\Microsoft\Windows\CurrentVersion\Policies\ActiveDesktop".to_string(),
value: Some("**del.NoChangingWallPaper".to_string()),
data_type: Some(RegistryValueType::REG_SZ),
data: Some(vec![0x20, 0x00, 0x00, 0x00]),
}]));
}
quickcheck! {
fn random(xs: Vec<u8>) -> bool {
// Not requiring is_err() because that's gonna give lots of false negatives
// (first trivial non-error case is at 8 bytes, next at 24, at 26 there are 65`534, etc.)
let _ = parse(&xs);
true
}
}
|
use std::{fs, env};
use http::{Request, Uri};
use isahc;
use isahc::ResponseExt;
use std::path::Path;
pub trait AocImplementation<T> {
fn start(&self, day: i32) {
download_input_file(day);
let contents = fs::read_to_string(get_day_filename(day)).expect("Failed to read input file");
let parsed = self.process_input(&contents);
let answer = self.execute(parsed);
match answer {
Some(a) => println!("Puzzle answer: {}", a),
None => eprintln!("Failed to calculate answer")
}
}
fn process_input(&self, input: &str) -> Vec<T>;
fn execute(&self, input: Vec<T>) -> Option<i32>;
}
fn get_day_filename(day: i32) -> String {
format!("day{}/input.txt", day)
}
// Ensures the input file exists, and downloads it if not
fn download_input_file(day: i32) {
let filename = get_day_filename(day);
if Path::new(&filename).exists() {
return
}
println!("Input file does not exist, downloading before running");
let cookie = env::var("AOC_SESSION_COOKIE").expect("AOC_SESSION_COOKIE env variable was not set");
let url: Uri = format!("https://adventofcode.com/2019/day/{}/input", day).parse().unwrap();
let request = Request::builder()
.uri(url)
.method("GET")
.header("cookie", format!("session={}", cookie))
.body(()).unwrap();
let content = isahc::send(request).unwrap().text().unwrap();
fs::write(&filename, content.trim()).unwrap();
} |
use crate::database::values::dsl::ExprDb;
use super::super::SQLiteDatabase;
use nu_engine::CallExt;
use nu_protocol::{
ast::Call,
engine::{Command, EngineState, Stack},
Category, Example, IntoPipelineData, PipelineData, ShellError, Signature, Span, SyntaxShape,
Type, Value,
};
use sqlparser::ast::{Expr, OrderByExpr, Statement};
#[derive(Clone)]
pub struct OrderByDb;
impl Command for OrderByDb {
fn name(&self) -> &str {
"order-by"
}
fn usage(&self) -> &str {
"Orders by query"
}
fn signature(&self) -> Signature {
Signature::build(self.name())
.switch("ascending", "Order by ascending values", Some('a'))
.switch("nulls-first", "Show nulls first in order", Some('n'))
.rest(
"select",
SyntaxShape::Any,
"Select expression(s) on the table",
)
.input_type(Type::Custom("database".into()))
.output_type(Type::Custom("database".into()))
.category(Category::Custom("database".into()))
}
fn search_terms(&self) -> Vec<&str> {
vec!["database"]
}
fn examples(&self) -> Vec<Example> {
vec![
Example {
description: "orders query by a column",
example: r#"open db.sqlite
| from table table_a
| select a
| order-by a
| describe"#,
result: Some(Value::Record {
cols: vec!["connection".into(), "query".into()],
vals: vec![
Value::String {
val: "db.sqlite".into(),
span: Span::test_data(),
},
Value::String {
val: "SELECT a FROM table_a ORDER BY a".into(),
span: Span::test_data(),
},
],
span: Span::test_data(),
}),
},
Example {
description: "orders query by column a ascending and by column b",
example: r#"open db.sqlite
| from table table_a
| select a
| order-by a --ascending
| order-by b
| describe"#,
result: Some(Value::Record {
cols: vec!["connection".into(), "query".into()],
vals: vec![
Value::String {
val: "db.sqlite".into(),
span: Span::test_data(),
},
Value::String {
val: "SELECT a FROM table_a ORDER BY a ASC, b".into(),
span: Span::test_data(),
},
],
span: Span::test_data(),
}),
},
]
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let asc = call.has_flag("ascending");
let nulls_first = call.has_flag("nulls-first");
let expressions: Vec<Value> = call.rest(engine_state, stack, 0)?;
let expressions = Value::List {
vals: expressions,
span: call.head,
};
let expressions = ExprDb::extract_exprs(expressions)?;
let expressions: Vec<OrderByExpr> = expressions
.into_iter()
.map(|expr| OrderByExpr {
expr,
asc: if asc { Some(asc) } else { None },
nulls_first: if nulls_first { Some(nulls_first) } else { None },
})
.collect();
let value = input.into_value(call.head);
if let Ok(expr) = ExprDb::try_from_value(&value) {
update_expressions(expr, expressions, call)
} else if let Ok(db) = SQLiteDatabase::try_from_value(value.clone()) {
update_connection(db, expressions, call)
} else {
Err(ShellError::CantConvert(
"expression or query".into(),
value.get_type().to_string(),
value.span()?,
None,
))
}
}
}
fn update_expressions(
mut expr: ExprDb,
mut expressions: Vec<OrderByExpr>,
call: &Call,
) -> Result<PipelineData, ShellError> {
match expr.as_mut() {
Expr::Function(function) => match &mut function.over {
Some(over) => over.order_by.append(&mut expressions),
None => {
return Err(ShellError::GenericError(
"Expression doesnt define a partition to order".into(),
"Expected an expression with partition".into(),
Some(call.head),
None,
Vec::new(),
))
}
},
s => {
return Err(ShellError::GenericError(
"Expression doesnt define a function".into(),
format!("Expected an expression with a function. Got {}", s),
Some(call.head),
None,
Vec::new(),
))
}
};
Ok(expr.into_value(call.head).into_pipeline_data())
}
fn update_connection(
mut db: SQLiteDatabase,
mut expressions: Vec<OrderByExpr>,
call: &Call,
) -> Result<PipelineData, ShellError> {
match db.statement.as_mut() {
Some(statement) => match statement {
Statement::Query(query) => {
query.order_by.append(&mut expressions);
}
s => {
return Err(ShellError::GenericError(
"Connection doesnt define a query".into(),
format!("Expected a connection with query. Got {}", s),
Some(call.head),
None,
Vec::new(),
))
}
},
None => {
return Err(ShellError::GenericError(
"Connection without statement".into(),
"The connection needs a statement defined".into(),
Some(call.head),
None,
Vec::new(),
))
}
};
Ok(db.into_value(call.head).into_pipeline_data())
}
#[cfg(test)]
mod test {
use super::super::super::expressions::{FieldExpr, OrExpr};
use super::super::{FromDb, ProjectionDb, WhereDb};
use super::*;
use crate::database::test_database::test_database;
#[test]
fn test_examples() {
test_database(vec![
Box::new(OrderByDb {}),
Box::new(ProjectionDb {}),
Box::new(FromDb {}),
Box::new(WhereDb {}),
Box::new(FieldExpr {}),
Box::new(OrExpr {}),
])
}
}
|
use serde_json::builder::ObjectBuilder;
use std::default::Default;
use ::model::{ChannelId, RoleId};
pub struct EditMember(pub ObjectBuilder);
impl EditMember {
pub fn deafen(self, deafen: bool) -> Self {
EditMember(self.0.insert("deaf", deafen))
}
pub fn mute(self, mute: bool) -> Self {
EditMember(self.0.insert("mute", mute))
}
pub fn nickname(self, nickname: &str) -> Self {
EditMember(self.0.insert("nick", nickname))
}
pub fn roles(self, roles: &[RoleId]) -> Self {
EditMember(self.0
.insert_array("roles",
|a| roles.iter().fold(a, |a, id| a.push(id.0))))
}
pub fn voice_channel<C: Into<ChannelId>>(self, channel_id: C) -> Self {
EditMember(self.0.insert("channel_id", channel_id.into().0))
}
}
impl Default for EditMember {
fn default() -> EditMember {
EditMember(ObjectBuilder::new())
}
}
|
#![cfg_attr(feature="clippy", feature(plugin))]
#![cfg_attr(feature="clippy", plugin(clippy))]
#![feature(plugin)]
#![plugin(rocket_codegen)]
extern crate rocket;
extern crate hyper;
use std::io::Read;
use std::sync::RwLock;
use rocket::State;
use hyper::Client;
use hyper::client::Response;
struct TLD {
tld: RwLock<Vec<String>>,
}
#[get("/")]
fn index() -> &'static str {
"A simple service to check if given domain ending ie. .com, .org. .latin is a registered by ICANN as Top Level Domain (TLD for short)"
}
#[get("/favicon.ico")]
fn favicon() -> &'static str {
"Am I preety?"
}
#[get("/<tld>")]
fn check_tld(tld: &str, state: State<TLD>) -> &'static str {
let tld = &tld.to_lowercase();
let found = state.tld.read().unwrap().iter().any(|x| x == tld);
if found { "FOUND" } else { "NOT FOUND" }
}
#[get("/update")]
fn update(state: State<TLD>) -> &'static str {
match get_update_from_url() {
Some(new_state) => {
*state.tld.write().unwrap() = new_state;
"Update successful"
}
None => "Update Failed",
}
}
fn get_update_from_url() -> Option<Vec<String>> {
let client = Client::new();
match client
.get("http://data.iana.org/TLD/tlds-alpha-by-domain.txt")
.send() {
Ok(response) => {
match parse_update_to_vec(response) {
Some(res) => Some(res),
None => None,
}
}
Err(_) => None,
}
}
fn parse_update_to_vec(mut response: Response) -> Option<Vec<String>> {
let mut buf = String::new();
match response.read_to_string(&mut buf) {
Ok(_) => Some(string_to_vec(&buf)),
Err(_) => None,
}
}
fn string_to_vec(response: &str) -> Vec<String> {
response.lines().map(|x| x.to_lowercase()).collect()
}
fn main() {
match get_update_from_url() {
Some(new_state) => {
rocket::ignite()
.manage(TLD { tld: RwLock::new(new_state) })
.mount("/", routes![index, favicon, check_tld, update])
.launch();
}
None => panic!("Geting TLD list from IANA failed!"),
}
}
|
use super::construct::saca;
#[cfg(feature = "pack")]
use super::packed_sa::PackedSuffixArray;
use super::utils::{lcp, trunc};
#[cfg(feature = "pack")]
use std::io::{Read, Result, Write};
use std::ops::Range;
#[cfg(feature = "pack")]
use std::path::Path;
/// Suffix array for searching byte strings.
#[derive(Clone)]
pub struct SuffixArray<'s> {
s: &'s [u8],
sa: Vec<u32>,
}
impl<'s> SuffixArray<'s> {
// Construct new suffix array for a byte string.
pub fn new(s: &'s [u8]) -> Self {
let mut sa = vec![0; s.len() + 1];
saca(s, &mut sa[..]);
SuffixArray { s, sa }
}
// Construct suffix array in place.
pub fn set(&mut self, s: &'s [u8]) {
self.sa.resize(s.len() + 1, 0);
saca(s, &mut self.sa[..]);
}
// Release the unused memory of suffix array.
pub fn fit(&mut self) {
self.sa.shrink_to_fit()
}
/// Length of the underlying byte string.
pub fn len(&self) -> usize {
self.s.len()
}
/// Test if the underlying byte string is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Take out the suffix array and its corresponding byte string.
pub fn into_parts(self) -> (&'s [u8], Vec<u32>) {
(self.s, self.sa)
}
/// Compose existed suffix array and its corresponding byte string
/// together, and checks the integrity.
pub fn from_parts(s: &'s [u8], sa: Vec<u32>) -> Option<Self> {
let compose = SuffixArray { s, sa };
if compose.check_integrity() {
Some(compose)
} else {
None
}
}
/// Compose existed suffix array and its corresponding byte string
/// together without integrity check.
pub unsafe fn unchecked_from_parts(s: &'s [u8], sa: Vec<u32>) -> Self {
SuffixArray { s, sa }
}
fn check_integrity(&self) -> bool {
if self.s.len() + 1 != self.sa.len() {
return false;
}
for i in 1..self.sa.len() {
let x = &self.s[self.sa[i - 1] as usize..];
let y = &self.s[self.sa[i] as usize..];
if x >= y {
return false;
}
}
true
}
/// Test if contains given sub-string.
pub fn contains(&self, sub: &[u8]) -> bool {
self.sa
.binary_search_by_key(&sub, |&i| {
trunc(&self.s[i as usize..], sub.len())
})
.is_ok()
}
/// Search for all the unsorted overlapping occurrence of given sub-string.
pub fn search_all(&self, sub: &[u8]) -> &[u32] {
let mut i = 0;
let mut k = self.sa.len();
while i < k {
let m = i + (k - i) / 2;
if sub > &self.s[self.sa[m] as usize..] {
i = m + 1;
} else {
k = m;
}
}
let mut j = i;
k = self.sa.len();
while j < k {
let m = j + (k - j) / 2;
if self.s[self.sa[m] as usize..].starts_with(sub) {
j = m + 1;
} else {
k = m;
}
}
&self.sa[i..j]
}
/// Search for one sub-string that has the longest common prefix of the
/// given pattern.
pub fn search_lcp(&self, pat: &[u8]) -> Range<usize> {
let point =
self.sa.binary_search_by(|&i| self.s[i as usize..].cmp(pat));
match point {
Ok(i) => {
let j = self.sa[i] as usize;
j..self.s.len()
}
Err(i) => {
if i > 0 && i < self.sa.len() {
let j = self.sa[i - 1] as usize;
let k = self.sa[i] as usize;
let a = lcp(pat, &self.s[j..]);
let b = lcp(pat, &self.s[k..]);
if a > b {
j..j + a
} else {
k..k + b
}
} else if i == self.sa.len() {
let j = self.sa[i - 1] as usize;
let a = lcp(pat, &self.s[j..]);
j..j + a
} else {
self.s.len()..self.s.len()
}
}
}
}
/// Write the suffix array (without the byte string).
#[cfg(feature = "pack")]
pub fn dump<W: Write>(&self, file: W) -> Result<()> {
let psa = PackedSuffixArray::from_sa(&self.sa[..]);
psa.dump(file)
}
/// Create a file and write the suffix array (without the byte string).
#[cfg(feature = "pack")]
pub fn dump_file<P: AsRef<Path>>(&self, name: P) -> Result<()> {
use std::fs::File;
use std::io::BufWriter;
let file = BufWriter::new(File::create(name)?);
let psa = PackedSuffixArray::from_sa(&self.sa[..]);
psa.dump(file)
}
/// Dump the suffix array as bytes (without the byte string).
#[cfg(feature = "pack")]
pub fn dump_bytes(&self) -> Result<Vec<u8>> {
let psa = PackedSuffixArray::from_sa(&self.sa[..]);
psa.dump_bytes()
}
/// Read suffix array without integrity check.
#[cfg(feature = "pack")]
pub unsafe fn unchecked_load<R: Read>(
s: &'s [u8],
file: R,
) -> Result<Self> {
let psa = PackedSuffixArray::load(file)?;
let sa = psa.into_sa();
Ok(Self::unchecked_from_parts(s, sa))
}
/// Read suffix array.
#[cfg(feature = "pack")]
pub fn load<R: Read>(s: &'s [u8], file: R) -> Result<Self> {
use std::io::{Error, ErrorKind};
let sa = unsafe { Self::unchecked_load(s, file)? };
if !sa.check_integrity() {
Err(Error::new(
ErrorKind::InvalidData,
"inconsistent suffix array",
))
} else {
Ok(sa)
}
}
/// Read suffix array from a file without integrity check.
#[cfg(feature = "pack")]
pub unsafe fn unchecked_load_file<P: AsRef<Path>>(
s: &'s [u8],
name: P,
) -> Result<Self> {
use std::fs::File;
use std::io::BufReader;
let file = BufReader::new(File::open(name)?);
Self::unchecked_load(s, file)
}
/// Read suffix array from a file.
#[cfg(feature = "pack")]
pub fn load_file<P: AsRef<Path>>(s: &'s [u8], name: P) -> Result<Self> {
use std::io::{Error, ErrorKind};
let sa = unsafe { Self::unchecked_load_file(s, name)? };
if !sa.check_integrity() {
Err(Error::new(
ErrorKind::InvalidData,
"inconsistent suffix array",
))
} else {
Ok(sa)
}
}
/// Load suffix array from bytes without integrity check.
#[cfg(feature = "pack")]
pub unsafe fn unchecked_load_bytes(
s: &'s [u8],
bytes: &[u8],
) -> Result<Self> {
let psa = PackedSuffixArray::load_bytes(bytes)?;
let sa = psa.into_sa();
Ok(Self::unchecked_from_parts(s, sa))
}
/// Load suffix array from bytes.
#[cfg(feature = "pack")]
pub fn load_bytes(s: &'s [u8], bytes: &[u8]) -> Result<Self> {
use std::io::{Error, ErrorKind};
let sa = unsafe { Self::unchecked_load_bytes(s, bytes)? };
if !sa.check_integrity() {
Err(Error::new(
ErrorKind::InvalidData,
"inconsistent suffix array",
))
} else {
Ok(sa)
}
}
}
impl<'s> From<SuffixArray<'s>> for Vec<u32> {
fn from(sa: SuffixArray<'s>) -> Vec<u32> {
sa.sa
}
}
impl<'s> AsRef<[u8]> for SuffixArray<'s> {
fn as_ref(&self) -> &[u8] {
self.s
}
}
|
use crate::error;
use crate::sparse_set::{OldComponent, Pack};
use crate::storage::EntityId;
use crate::type_id::TypeId;
use crate::view::ViewMut;
use alloc::vec::Vec;
use core::any::type_name;
pub trait Removable {
type Out;
}
/// Removes component from entities.
pub trait Remove<T: Removable> {
/// Removes component in `entity`, if the entity had them, they will be returned.
///
/// Multiple components can be removed at the same time using a tuple.
///
/// `T` has to be a tuple even for a single type.
/// In this case use (T,).
///
/// The compiler has trouble inferring the return types.
/// You'll often have to use the full path `Remove::<type>::try_remove`.
/// ### Example
/// ```
/// use shipyard::{EntitiesViewMut, OldComponent, Remove, ViewMut, World};
///
/// let world = World::new();
///
/// world.run(|mut entities: EntitiesViewMut, mut usizes: ViewMut<usize>, mut u32s: ViewMut<u32>| {
/// let entity1 = entities.add_entity((&mut usizes, &mut u32s), (0usize, 1u32));
/// let old = Remove::<(usize, u32)>::try_remove((&mut usizes, &mut u32s), entity1).unwrap();
/// assert_eq!(old, (Some(OldComponent::Owned(0)), Some(OldComponent::Owned(1))));
/// });
/// ```
/// When using packed storages you have to pass all storages packed with it,
/// even if you don't remove any component from it.
/// ### Example
/// ```
/// use shipyard::{EntitiesViewMut, OldComponent, Remove, TightPack, ViewMut, World};
///
/// let world = World::new();
///
/// world.run(|mut entities: EntitiesViewMut, mut usizes: ViewMut<usize>, mut u32s: ViewMut<u32>| {
/// (&mut usizes, &mut u32s).tight_pack();
/// let entity1 = entities.add_entity((&mut usizes, &mut u32s), (0usize, 1u32));
/// let old = Remove::<(usize,)>::try_remove((&mut usizes, &mut u32s), entity1).unwrap();
/// assert_eq!(old, (Some(OldComponent::Owned(0)),));
/// });
/// ```
fn try_remove(self, entity: EntityId) -> Result<T::Out, error::Remove>;
/// Removes component in `entity`, if the entity had them, they will be returned.
///
/// Multiple components can be removed at the same time using a tuple.
///
/// `T` has to be a tuple even for a single type.
/// In this case use (T,).
///
/// The compiler has trouble inferring the return types.
/// You'll often have to use the full path `Remove::<type>::remove`.
///
/// Unwraps errors.
/// ### Example
/// ```
/// use shipyard::{EntitiesViewMut, OldComponent, Remove, ViewMut, World};
///
/// let world = World::new();
///
/// world.run(|mut entities: EntitiesViewMut, mut usizes: ViewMut<usize>, mut u32s: ViewMut<u32>| {
/// let entity1 = entities.add_entity((&mut usizes, &mut u32s), (0usize, 1u32));
/// let old = Remove::<(usize, u32)>::remove((&mut usizes, &mut u32s), entity1);
/// assert_eq!(old, (Some(OldComponent::Owned(0)), Some(OldComponent::Owned(1))));
/// });
/// ```
/// When using packed storages you have to pass all storages packed with it,
/// even if you don't remove any component from it.
/// ### Example
/// ```
/// use shipyard::{EntitiesViewMut, OldComponent, Remove, TightPack, ViewMut, World};
///
/// let world = World::new();
///
/// world.run(|mut entities: EntitiesViewMut, mut usizes: ViewMut<usize>, mut u32s: ViewMut<u32>| {
/// (&mut usizes, &mut u32s).tight_pack();
/// let entity1 = entities.add_entity((&mut usizes, &mut u32s), (0usize, 1u32));
/// let old = Remove::<(usize,)>::remove((&mut usizes, &mut u32s), entity1);
/// assert_eq!(old, (Some(OldComponent::Owned(0)),));
/// });
/// ```
#[cfg(feature = "panic")]
#[cfg_attr(docsrs, doc(cfg(feature = "panic")))]
fn remove(self, entity: EntityId) -> T::Out;
}
macro_rules! impl_removable {
($(($type: ident, $index: tt))+) => {
impl<$($type),+> Removable for ($($type,)+) {
type Out = ($(Option<OldComponent<$type>>,)+);
}
}
}
macro_rules! impl_remove {
// add is short for additional
($(($type: ident, $index: tt))+; $(($add_type: ident, $add_index: tt))*) => {
impl<$($type: 'static,)+ $($add_type: 'static),*> Remove<($($type,)*)> for ($(&mut ViewMut<'_, $type>,)+ $(&mut ViewMut<'_, $add_type>,)*) {
fn try_remove(self, entity: EntityId) -> Result<<($($type,)+) as Removable>::Out, error::Remove> {
// non packed storages should not pay the price of pack
if $(core::mem::discriminant(&self.$index.metadata.pack) != core::mem::discriminant(&Pack::NoPack) || !self.$index.metadata.observer_types.is_empty())||+ {
let mut types = [$(TypeId::of::<$type>()),+];
types.sort_unstable();
let mut add_types = [$(TypeId::of::<$add_type>()),*];
add_types.sort_unstable();
let mut should_unpack = Vec::with_capacity(types.len() + add_types.len());
$(
if self.$index.metadata.has_all_storages(&types, &add_types) {
match &self.$index.metadata.pack {
Pack::Tight(pack) => {
should_unpack.extend_from_slice(&pack.types);
should_unpack.extend_from_slice(&self.$index.metadata.observer_types);
}
Pack::Loose(pack) => {
should_unpack.extend_from_slice(&pack.tight_types);
should_unpack.extend_from_slice(&self.$index.metadata.observer_types);
}
Pack::Update(_) => should_unpack.extend_from_slice(&self.$index.metadata.observer_types),
Pack::NoPack => should_unpack.extend_from_slice(&self.$index.metadata.observer_types),
}
} else {
return Err(error::Remove::MissingPackStorage(type_name::<$type>()));
}
)+
$(
if should_unpack.contains(&TypeId::of::<$add_type>()) {
self.$add_index.unpack(entity);
}
)*
}
Ok(($(
self.$index.actual_remove(entity),
)+))
}
#[cfg(feature = "panic")]
fn remove(self, entity: EntityId) -> <($($type,)+) as Removable>::Out {
Remove::<($($type,)+)>::try_remove(self, entity).unwrap()
}
}
}
}
macro_rules! remove {
(($type1: ident, $index1: tt) $(($type: ident, $index: tt))*;; ($queue_type1: ident, $queue_index1: tt) $(($queue_type: ident, $queue_index: tt))*) => {
impl_remove![($type1, $index1) $(($type, $index))*;];
impl_removable![($type1, $index1) $(($type, $index))*];
remove![($type1, $index1); $(($type, $index))* ($queue_type1, $queue_index1); $(($queue_type, $queue_index))*];
};
// add is short for additional
($(($type: ident, $index: tt))+; ($add_type1: ident, $add_index1: tt) $(($add_type: ident, $add_index: tt))*; $(($queue_type: ident, $queue_index: tt))*) => {
impl_remove![$(($type, $index))+; ($add_type1, $add_index1) $(($add_type, $add_index))*];
remove![$(($type, $index))+ ($add_type1, $add_index1); $(($add_type, $add_index))*; $(($queue_type, $queue_index))*];
};
($(($type: ident, $index: tt))+;;) => {
impl_remove![$(($type, $index))+;];
impl_removable![$(($type, $index))+];
}
}
remove![(A, 0);; (B, 1) (C, 2) (D, 3) (E, 4) (F, 5) (G, 6) (H, 7) (I, 8) (J, 9)];
|
use diesel::connection::AnsiTransactionManager;
use diesel::pg::Pg;
use diesel::Connection;
use stq_db::diesel_repo::*;
use repos::*;
pub trait ReposFactory<C: Connection<Backend = Pg, TransactionManager = AnsiTransactionManager> + 'static>:
Clone + Send + Sync + 'static
{
fn create_pages_repo<'a>(&self, db_conn: &'a C) -> Box<PagesRepo + 'a>;
}
#[derive(Clone, Default)]
pub struct ReposFactoryImpl;
impl<C: Connection<Backend = Pg, TransactionManager = AnsiTransactionManager> + 'static> ReposFactory<C> for ReposFactoryImpl {
fn create_pages_repo<'a>(&self, db_conn: &'a C) -> Box<PagesRepo + 'a> {
Box::new(DieselRepoImpl::new(db_conn)) as Box<PagesRepo>
}
}
|
#![no_std]
use core::panic::PanicInfo;
use raspi_pico_sdk::*;
#[no_mangle]
unsafe fn main() {
let led_pin = 25;
wrapped_gpio_init(led_pin);
wrapped_gpio_set_dir(led_pin, true);
loop {
wrapped_gpio_put(led_pin, true);
wrapped_sleep_ms(250);
wrapped_gpio_put(led_pin, false);
wrapped_sleep_ms(250);
}
}
#[panic_handler]
fn panic(_info: &PanicInfo) -> ! {
loop {}
}
|
use chrono::prelude::*;
pub fn easter(y: isize, offset: isize) -> Vec<isize> {
let a = y % 19;
let b = (y as f32 / 100_f32).floor() as isize;
let c = y % 100;
let d = (b as f32 / 4_f32).floor() as isize;
let e = b % 4;
let f = ((b + 8) as f32 / 25_f32).floor() as isize;
let g = ((b - f + 1) as f32 / 3_f32).floor() as isize;
let h = (19 * a + b - d - g + 15) % 30;
let i = (c as f32 / 4_f32).floor() as isize;
let k = c % 4;
let l = (32 + 2 * e + 2 * i - h - k) % 7;
let m = ((a + 11 * h + 22 * l) as f32 / 451_f32).floor() as isize;
let month = ((h + l - 7 * m + 114) as f32 / 31_f32).floor() as u32;
let day = ((h + l - 7 * m + 114) % 31) + 1;
let date = Utc
.ymd(y as i32, month, (day + offset) as u32)
.and_hms(0, 0, 0);
let year_start = Utc.ymd(y as i32, 1, 1).and_hms(0, 0, 0);
return vec![
((date.timestamp() - year_start.timestamp()) as f32 / (60 * 60 * 24) as f32).ceil()
as isize,
];
}
#[cfg(test)]
mod test_easter_masks {
use super::*;
#[test]
fn easter_mask() {
let mask = easter(1997, 0);
assert_eq!(mask[0], 88);
let mask = easter(1998, 0);
assert_eq!(mask[0], 101);
let mask = easter(1999, 0);
assert_eq!(mask[0], 93);
let mask = easter(2000, 0);
assert_eq!(mask[0], 113);
}
}
|
//! Cmd arguments
// Imports
use std::path::PathBuf;
/// Arguments
#[derive(Clone, Debug)]
pub enum Args {
/// Serialize to C
SerializeC {
/// Input file
input_file: PathBuf,
},
}
/// Parses arguments
pub fn parse() -> Result<Args, anyhow::Error> {
const SERIALIZE_C_SUBCMD: &str = "serialize-c";
const INPUT_FILE_STR: &str = "input-file";
// Serialize to C subcommand
let serialize_c_subcmd = clap::SubCommand::with_name(SERIALIZE_C_SUBCMD)
.help("Serialize the json base stats to C")
.arg(
clap::Arg::with_name(INPUT_FILE_STR)
.takes_value(true)
.required(true)
.index(1),
);
let matches = clap::App::new("poke-base-stats")
.subcommands([serialize_c_subcmd])
.get_matches();
let args = match matches.subcommand() {
(SERIALIZE_C_SUBCMD, Some(matches)) => {
let input_file = matches
.value_of_os(INPUT_FILE_STR)
.map(PathBuf::from)
.expect("Required argument missing");
Args::SerializeC { input_file }
},
(subcmd, _) => anyhow::bail!("Unknown subcommand {}", subcmd),
};
Ok(args)
}
|
use std::io::{stdin, Read, StdinLock};
use std::str::FromStr;
#[allow(dead_code)]
struct Scanner<'a> {
cin: StdinLock<'a>,
}
#[allow(dead_code)]
impl<'a> Scanner<'a> {
fn new(cin: StdinLock<'a>) -> Scanner<'a> {
Scanner { cin: cin }
}
fn read<T: FromStr>(&mut self) -> Option<T> {
let token = self.cin.by_ref().bytes().map(|c| c.unwrap() as char)
.skip_while(|c| c.is_whitespace())
.take_while(|c| !c.is_whitespace())
.collect::<String>();
token.parse::<T>().ok()
}
fn input<T: FromStr>(&mut self) -> T {
self.read().unwrap()
}
fn vec<T: FromStr>(&mut self, len: usize) -> Vec<T> {
(0..len).map(|_| self.input()).collect()
}
fn mat<T: FromStr>(&mut self, row: usize, col: usize) -> Vec<Vec<T>> {
(0..row).map(|_| self.vec(col)).collect()
}
}
use std::collections::BTreeMap;
fn main() {
let cin = stdin();
let cin = cin.lock();
let mut sc = Scanner::new(cin);
let n: i64 = sc.input();
let arr: Vec<i64> = sc.vec(n as usize);
let mut map = BTreeMap::new();
for a in &arr {
map.entry(a).and_modify(|x| *x += 1).or_insert(1);
}
let mut sum = 0;
for (k, v) in &map {
sum += if v * (v - 1) / 2 > 0 {
v * (v - 1) / 2
} else {
0
};
}
for a in &arr {
let x = map.get(a).unwrap();
let ans = sum - x * (x - 1) / 2 + if x - 1 > 1 {
(x - 1) * (x - 2) / 2
} else {
0
};
println!("{}", ans);
}
}
|
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! The node header.
use crate::trie_constants;
use codec::{Decode, Encode, Input, Output};
use sp_std::iter::once;
/// A node header
#[derive(Copy, Clone, PartialEq, Eq, sp_core::RuntimeDebug)]
pub(crate) enum NodeHeader {
Null,
Branch(bool, usize),
Leaf(usize),
}
/// NodeHeader without content
pub(crate) enum NodeKind {
Leaf,
BranchNoValue,
BranchWithValue,
}
impl Encode for NodeHeader {
fn encode_to<T: Output>(&self, output: &mut T) {
match self {
NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE),
NodeHeader::Branch(true, nibble_count) =>
encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, output),
NodeHeader::Branch(false, nibble_count) =>
encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITHOUT_MASK, output),
NodeHeader::Leaf(nibble_count) =>
encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, output),
}
}
}
impl codec::EncodeLike for NodeHeader {}
impl Decode for NodeHeader {
fn decode<I: Input>(input: &mut I) -> Result<Self, codec::Error> {
let i = input.read_byte()?;
if i == trie_constants::EMPTY_TRIE {
return Ok(NodeHeader::Null)
}
match i & (0b11 << 6) {
trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input)?)),
trie_constants::BRANCH_WITHOUT_MASK =>
Ok(NodeHeader::Branch(false, decode_size(i, input)?)),
trie_constants::BRANCH_WITH_MASK =>
Ok(NodeHeader::Branch(true, decode_size(i, input)?)),
// do not allow any special encoding
_ => Err("Unallowed encoding".into()),
}
}
}
/// Returns an iterator over encoded bytes for node header and size.
/// Size encoding allows unlimited, length inefficient, representation, but
/// is bounded to 16 bit maximum value to avoid possible DOS.
pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8) -> impl Iterator<Item = u8> {
let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size);
let l1 = sp_std::cmp::min(62, size);
let (first_byte, mut rem) =
if size == l1 { (once(prefix + l1 as u8), 0) } else { (once(prefix + 63), size - l1) };
let next_bytes = move || {
if rem > 0 {
if rem < 256 {
let result = rem - 1;
rem = 0;
Some(result as u8)
} else {
rem = rem.saturating_sub(255);
Some(255)
}
} else {
None
}
};
first_byte.chain(sp_std::iter::from_fn(next_bytes))
}
/// Encodes size and prefix to a stream output.
fn encode_size_and_prefix(size: usize, prefix: u8, out: &mut impl Output) {
for b in size_and_prefix_iterator(size, prefix) {
out.push_byte(b)
}
}
/// Decode size only from stream input and header byte.
fn decode_size(first: u8, input: &mut impl Input) -> Result<usize, codec::Error> {
let mut result = (first & 255u8 >> 2) as usize;
if result < 63 {
return Ok(result)
}
result -= 1;
while result <= trie_constants::NIBBLE_SIZE_BOUND {
let n = input.read_byte()? as usize;
if n < 255 {
return Ok(result + n + 1)
}
result += 255;
}
Ok(trie_constants::NIBBLE_SIZE_BOUND)
}
|
/*
* Copyright (c) 2021, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
// Rust Library
use std::ffi::CStr;
use std::mem;
use std::os::raw::c_char;
use std::slice;
mod bal_map;
pub use bal_map::map::BalMapInt;
// String comparision
#[no_mangle]
pub extern "C" fn is_same_type(src_type: *const c_char, dest_type: *const c_char) -> bool {
return src_type == dest_type;
}
// Prints 64 bit signed integer
#[no_mangle]
pub extern "C" fn print64(num64: i64) {
println!("{}", num64);
}
// Prints 32 bit signed integer
#[no_mangle]
pub extern "C" fn print32(num32: i32) {
println!("{}", num32);
}
// Prints 16 bit signed integer
#[no_mangle]
pub extern "C" fn print16(num16: i16) {
println!("{}", num16);
}
// Prints 8 bit signed integer
#[no_mangle]
pub extern "C" fn print8(num8: i8) {
println!("{}", num8);
}
// Prints 64 bit unsigned integer
#[no_mangle]
pub extern "C" fn printu64(num64: u64) {
println!("{}", num64);
}
// Prints 32 bit unsigned integer
#[no_mangle]
pub extern "C" fn printu32(num32: u32) {
println!("{}", num32);
}
// Prints 16 bit unsigned integer
#[no_mangle]
pub extern "C" fn printu16(num16: u16) {
println!("{}", num16);
}
// Prints 8 bit unsigned integer
#[no_mangle]
pub extern "C" fn printu8(num8: u8) {
println!("{}", num8);
}
// Prints 64 bit float
#[no_mangle]
pub extern "C" fn printf64(num64: f64) {
println!("{}", num64);
}
// Prints 32 bit float
#[no_mangle]
pub extern "C" fn printf32(num32: f32) {
println!("{}", num32);
}
#[no_mangle]
pub extern "C" fn new_int_array(size: i32) -> *mut Vec<*mut i32> {
let mut size_t = size;
if size < 0 {
size_t = 8;
}
let size_t = size_t as usize;
let foo: Box<Vec<*mut i32>> = Box::new(Vec::with_capacity(size_t));
let vec_pointer = Box::into_raw(foo);
return vec_pointer as *mut Vec<*mut i32>;
}
#[no_mangle]
pub extern "C" fn int_array_store(arr_ptr: *mut Vec<*mut i32>, n: i32, ref_ptr: *mut i32) {
let mut arr = unsafe { Box::from_raw(arr_ptr) };
let n_size = n as usize;
let len = n_size + 1;
if arr.len() < len {
arr.resize(len, 0 as *mut i32);
}
arr[n_size] = ref_ptr;
mem::forget(arr);
}
#[no_mangle]
pub extern "C" fn int_array_load(arr_ptr: *mut Vec<*mut i32>, n: i32) -> *mut i32 {
let arr = unsafe { Box::from_raw(arr_ptr) };
let n_size = n as usize;
let return_val = arr[n_size];
mem::forget(arr);
return return_val;
}
// Ballerina Map implementation
#[no_mangle]
pub extern "C" fn map_new_int() -> *mut BalMapInt {
Box::into_raw(Box::new(BalMapInt::new()))
}
#[no_mangle]
pub extern "C" fn map_deint_int(ptr: *mut BalMapInt) {
if ptr.is_null() {
return;
}
unsafe {
Box::from_raw(ptr);
}
}
#[no_mangle]
pub extern "C" fn map_store_int(ptr: *mut BalMapInt, key: *const c_char, member_ptr: *const i32) {
// Load BalMap from pointer
let bal_map = unsafe {
assert!(!ptr.is_null());
&mut *ptr
};
// Load Key C string
let key = unsafe {
assert!(!key.is_null());
CStr::from_ptr(key)
};
let key_str = key.to_str().unwrap();
// Load member value
let member = unsafe {
assert!(!member_ptr.is_null());
slice::from_raw_parts(member_ptr, 1)
};
// Insert new field
bal_map.insert(key_str, member[0]);
// Print length to test functionality
println!("length={}", bal_map.length());
}
|
extern crate libc;
#[cfg(test)]
#[macro_use]
extern crate lazy_static;
#[cfg(test)]
extern crate rand;
use std::io::{Read, Write};
use std::net::TcpListener;
use std::ffi::CString;
mod channel;
/// This is a opaque rust equivalent for comedi_t inside libcomedi.h
#[allow(non_camel_case_types)]
enum comedi_t {}
#[link(name = "comedi")]
extern "C" {
fn comedi_open(interface_name: *const libc::c_char) -> *const comedi_t;
fn comedi_dio_config(it: *const comedi_t, subd: libc::c_uint, chan: libc::c_uint, dir: libc::c_uint) -> libc::c_int;
fn comedi_dio_write(it: *const comedi_t, subd: libc::c_uint, chan: libc::c_uint, bit: libc::c_uint) -> libc::c_int;
fn comedi_dio_read(it: *const comedi_t, subd: libc::c_uint, chan: libc::c_uint, bit: *mut libc::c_uint) -> libc::c_int;
fn comedi_data_write(it: *const comedi_t, subd: libc::c_uint, chan: libc::c_uint, range: libc::c_uint, aref: libc::c_uint, data: libc::c_uint) -> libc::c_int;
}
enum Command {
Reserved,
WriteMotorDirection(ElevatorDirection),
WriteOrderButtonLight(ButtonType, u8, bool),
WriteFloorIndicator(u8),
WriteDoorOpenLight(bool),
WriteStopButtonLight(bool),
ReadOrderButton(ButtonType, u8),
ReadFloorSensor,
ReadStopButton,
ReadObstructionSwitch,
}
impl Command {
fn decode(data: &[u8]) -> Self {
assert_eq!(data.len(), 4);
match data[0] {
0 => Command::Reserved,
1 => Command::WriteMotorDirection(ElevatorDirection::decode(data[1])),
2 => Command::WriteOrderButtonLight(ButtonType::decode(data[1]), data[2], data[3] != 0),
3 => Command::WriteFloorIndicator(data[1]),
4 => Command::WriteDoorOpenLight(data[1] != 0),
5 => Command::WriteStopButtonLight(data[1] != 0),
6 => Command::ReadOrderButton(ButtonType::decode(data[1]), data[2]),
7 => Command::ReadFloorSensor,
8 => Command::ReadStopButton,
9 => Command::ReadObstructionSwitch,
x => panic!("Not a valid command code: {}", x),
}
}
}
pub enum ElevatorDirection{
Up,
Down,
Stop,
}
impl ElevatorDirection {
fn decode(data: u8) -> Self {
match data {
0 => ElevatorDirection::Stop,
1 => ElevatorDirection::Up,
255 => ElevatorDirection::Down,
x => panic!("Not a valid direction code: {}", x),
}
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
enum ButtonType {
HallUp,
HallDown,
Cab,
}
impl ButtonType {
fn decode(data: u8) -> Self {
match data {
0 => ButtonType::HallUp,
1 => ButtonType::HallDown,
2 => ButtonType::Cab,
x => panic!("Not a valid ButtonType code: {}", x),
}
}
}
pub struct ElevatorInterface(*const comedi_t);
unsafe impl Send for ElevatorInterface {}
impl ElevatorInterface {
const MOTOR_SPEED: u32 = 2800;
const N_FLOORS: u8 = 4;
fn open(interface_name: &str) -> Result<Self, ()> {
unsafe {
let comedi = comedi_open(CString::new(interface_name).unwrap().as_ptr());
if comedi.is_null() {
Err(())
} else {
let mut status = 0;
for i in 0..8 {
status |= comedi_dio_config(comedi, channel::PORT_1_SUBDEVICE, i, channel::PORT_1_DIRECTION);
status |= comedi_dio_config(comedi, channel::PORT_2_SUBDEVICE, i, channel::PORT_2_DIRECTION);
status |= comedi_dio_config(comedi, channel::PORT_3_SUBDEVICE, i+8, channel::PORT_3_DIRECTION);
status |= comedi_dio_config(comedi, channel::PORT_4_SUBDEVICE, i+16, channel::PORT_4_DIRECTION);
}
if status == 0 {
Ok(ElevatorInterface(comedi))
} else {
Err(())
}
}
}
}
fn set_direction(&self, dir: ElevatorDirection) {
unsafe {
match dir {
ElevatorDirection::Up => {
comedi_dio_write(self.0, channel::MOTORDIR >> 8, channel::MOTORDIR & 0xff, 0);
comedi_data_write(self.0, channel::MOTOR >> 8, channel::MOTOR & 0xff, 0, 0, Self::MOTOR_SPEED);
},
ElevatorDirection::Down => {
comedi_dio_write(self.0, channel::MOTORDIR >> 8, channel::MOTORDIR & 0xff, 1);
comedi_data_write(self.0, channel::MOTOR >> 8, channel::MOTOR & 0xff, 0, 0, Self::MOTOR_SPEED);
},
ElevatorDirection::Stop => {
comedi_data_write(self.0, channel::MOTOR >> 8, channel::MOTOR & 0xff, 0, 0, 0);
},
}
}
}
fn read_floor_sensor(&self) -> Option<u8> {
unsafe {
let mut data: libc::c_uint = 0;
comedi_dio_read(self.0, channel::SENSOR_FLOOR0 >> 8, channel::SENSOR_FLOOR0 & 0xff, &mut data);
if data != 0 {
return Some(0);
}
comedi_dio_read(self.0, channel::SENSOR_FLOOR1 >> 8, channel::SENSOR_FLOOR1 & 0xff, &mut data);
if data != 0 {
return Some(1);
}
comedi_dio_read(self.0, channel::SENSOR_FLOOR2 >> 8, channel::SENSOR_FLOOR2 & 0xff, &mut data);
if data != 0 {
return Some(2);
}
comedi_dio_read(self.0, channel::SENSOR_FLOOR3 >> 8, channel::SENSOR_FLOOR3 & 0xff, &mut data);
if data != 0 {
return Some(3);
}
None
}
}
fn set_order_button_light(&self, button_type: ButtonType, floor: u8, on_not_off: bool) {
assert!(floor < ElevatorInterface::N_FLOORS);
unsafe {
match (button_type, floor) {
(ButtonType::HallUp, 0) => comedi_dio_write(self.0, channel::LIGHT_UP0 >> 8, channel::LIGHT_UP0 & 0xff, on_not_off as libc::c_uint),
(ButtonType::Cab, 0) => comedi_dio_write(self.0, channel::LIGHT_COMMAND0 >> 8, channel::LIGHT_COMMAND0 & 0xff, on_not_off as libc::c_uint),
(ButtonType::HallDown, 0) => 0,
(ButtonType::HallUp, 1) => comedi_dio_write(self.0, channel::LIGHT_UP1 >> 8, channel::LIGHT_UP1 & 0xff, on_not_off as libc::c_uint),
(ButtonType::HallDown, 1) => comedi_dio_write(self.0, channel::LIGHT_DOWN1 >> 8, channel::LIGHT_DOWN1 & 0xff, on_not_off as libc::c_uint),
(ButtonType::Cab, 1) => comedi_dio_write(self.0, channel::LIGHT_COMMAND1 >> 8, channel::LIGHT_COMMAND1 & 0xff, on_not_off as libc::c_uint),
(ButtonType::HallUp, 2) => comedi_dio_write(self.0, channel::LIGHT_UP2 >> 8, channel::LIGHT_UP2 & 0xff, on_not_off as libc::c_uint),
(ButtonType::HallDown, 2) => comedi_dio_write(self.0, channel::LIGHT_DOWN2 >> 8, channel::LIGHT_DOWN2 & 0xff, on_not_off as libc::c_uint),
(ButtonType::Cab, 2) => comedi_dio_write(self.0, channel::LIGHT_COMMAND2 >> 8, channel::LIGHT_COMMAND2 & 0xff, on_not_off as libc::c_uint),
(ButtonType::HallUp, 3) => 0,
(ButtonType::HallDown, 3) => comedi_dio_write(self.0, channel::LIGHT_DOWN3 >> 8, channel::LIGHT_DOWN3 & 0xff, on_not_off as libc::c_uint),
(ButtonType::Cab, 3) => comedi_dio_write(self.0, channel::LIGHT_COMMAND3 >> 8, channel::LIGHT_COMMAND3 & 0xff, on_not_off as libc::c_uint),
(b, f) => panic!("You tried to set lamp in non-existing button: {:?}:{} <button:floor>", b, f), //TODO: implement display for ButtonType
};
}
}
fn read_order_button(&self, button_type: ButtonType, floor: u8) -> bool {
assert!(floor < 4);
unsafe {
let mut data: libc::c_uint = 0;
match (button_type, floor) {
(ButtonType::HallUp, 0) => comedi_dio_read(self.0, channel::BUTTON_UP0 >> 8, channel::BUTTON_UP0 & 0xff, &mut data),
(ButtonType::Cab, 0) => comedi_dio_read(self.0, channel::BUTTON_COMMAND0 >> 8, channel::BUTTON_COMMAND0 & 0xff, &mut data),
(ButtonType::HallDown, 0) => 0,
(ButtonType::HallUp, 1) => comedi_dio_read(self.0, channel::BUTTON_UP1 >> 8, channel::BUTTON_UP1 & 0xff, &mut data),
(ButtonType::HallDown, 1) => comedi_dio_read(self.0, channel::BUTTON_DOWN1 >> 8, channel::BUTTON_DOWN1 & 0xff, &mut data),
(ButtonType::Cab, 1) => comedi_dio_read(self.0, channel::BUTTON_COMMAND1 >> 8, channel::BUTTON_COMMAND1 & 0xff, &mut data),
(ButtonType::HallUp, 2) => comedi_dio_read(self.0, channel::BUTTON_UP2 >> 8, channel::BUTTON_UP2 & 0xff, &mut data),
(ButtonType::HallDown, 2) => comedi_dio_read(self.0, channel::BUTTON_DOWN2 >> 8, channel::BUTTON_DOWN2 & 0xff, &mut data),
(ButtonType::Cab, 2) => comedi_dio_read(self.0, channel::BUTTON_COMMAND2 >> 8, channel::BUTTON_COMMAND2 & 0xff, &mut data),
(ButtonType::HallUp, 3) => 0,
(ButtonType::HallDown, 3) => comedi_dio_read(self.0, channel::BUTTON_DOWN3 >> 8, channel::BUTTON_DOWN3 & 0xff, &mut data),
(ButtonType::Cab, 3) => comedi_dio_read(self.0, channel::BUTTON_COMMAND3 >> 8, channel::BUTTON_COMMAND3 & 0xff, &mut data),
(b, f) => panic!("You tried to set lamp in non-existing button: {:?}:{} <button:floor>", b, f), //TODO: implement display for ButtonType
};
data != 0
}
}
fn set_stop_button_light(&self, on_not_off: bool) {
unsafe {
comedi_dio_write(self.0, channel::LIGHT_STOP >> 8, channel::LIGHT_STOP & 0xff, on_not_off as libc::c_uint);
}
}
fn read_stop_button(&self) -> bool {
unsafe{
let mut data: libc::c_uint = 0;
comedi_dio_read(self.0, channel::STOP >> 8, channel::STOP & 0xff, &mut data);
data != 0
}
}
fn set_floor_indicator(&self, floor: u8) {
assert!(floor < 4);
unsafe {
comedi_dio_write(self.0, channel::LIGHT_FLOOR_IND0 >> 8, channel::LIGHT_FLOOR_IND0 & 0xff, ((floor & 1<<1) != 0) as u32);
comedi_dio_write(self.0, channel::LIGHT_FLOOR_IND1 >> 8, channel::LIGHT_FLOOR_IND1 & 0xff, ((floor & 1<<0) != 0) as u32);
}
}
fn set_door_light(&self, on_not_off: bool) {
unsafe {
comedi_dio_write(self.0, channel::LIGHT_DOOR_OPEN >> 8, channel::LIGHT_DOOR_OPEN & 0xff, on_not_off as libc::c_uint);
}
}
fn read_obstruction_sensor(&self) -> bool {
unsafe {
let mut data: libc::c_uint = 0;
comedi_dio_read(self.0, channel::OBSTRUCTION >> 8, channel::OBSTRUCTION & 0xff, &mut data);
data != 0
}
}
}
impl Drop for ElevatorInterface {
fn drop(&mut self) {
self.set_direction(ElevatorDirection::Stop);
}
}
fn main() {
println!("Elevator server started");
let (mut stream, _addr) = TcpListener::bind("localhost:15657").unwrap().accept().unwrap();
let elevator = ElevatorInterface::open("/dev/comedi0").unwrap();
println!("Client connected to server");
loop {
let mut received_data = [0u8; 4];
if let Err(_) = stream.read_exact(&mut received_data) {
println!("Lost connection to client");
return;
}
let command = Command::decode(&received_data);
match command {
Command::Reserved => (),
Command::WriteMotorDirection(dir) => elevator.set_direction(dir),
Command::WriteOrderButtonLight(button, floor, state) => elevator.set_order_button_light(button, floor, state),
Command::WriteFloorIndicator(floor) => elevator.set_floor_indicator(floor),
Command::WriteDoorOpenLight(state) => elevator.set_door_light(state),
Command::WriteStopButtonLight(state) => elevator.set_stop_button_light(state),
Command::ReadOrderButton(button, floor) => {
let response_data = [6u8, elevator.read_order_button(button, floor) as u8, 0, 0];
stream.write_all(&response_data).unwrap();
},
Command::ReadFloorSensor => {
let response_data = match elevator.read_floor_sensor() {
Some(floor) => [7u8, 1, floor, 0],
None => [7u8, 0, 0, 0],
};
stream.write_all(&response_data).unwrap();
},
Command::ReadStopButton => {
let response_data = [9u8, elevator.read_stop_button() as u8, 0, 0];
stream.write_all(&response_data).unwrap();
},
Command::ReadObstructionSwitch => {
let response_data = [9u8, elevator.read_obstruction_sensor() as u8, 0, 0];
stream.write_all(&response_data).unwrap();
},
}
}
}
#[cfg(test)]
mod tests {
use *;
use std::sync::Mutex;
use std::thread;
use std::time::Duration;
// These tests are executed on an actual elevator. To make sure only one test is run at the same time, the elevator is protected by this mutex.
lazy_static! {
static ref ELEVATOR: Mutex<ElevatorInterface> = {
let elevator = ElevatorInterface::open("/dev/comedi0").unwrap();
for f in 0..ElevatorInterface::N_FLOORS { elevator.set_order_button_light(ButtonType::Cab, f, false); }
for f in 1..ElevatorInterface::N_FLOORS { elevator.set_order_button_light(ButtonType::HallDown, f, false); }
for f in 0..ElevatorInterface::N_FLOORS-1 { elevator.set_order_button_light(ButtonType::HallUp, f, false); }
elevator.set_stop_button_light(false);
Mutex::new(elevator)
};
}
#[test]
fn init_elevator() {
ELEVATOR.lock().unwrap();
}
#[test]
fn test_run() {
let elevator = ELEVATOR.lock().unwrap();
println!("The elevator will now do a run from the bottom floor to the top floor. It will stop in the floor below the top floor");
elevator.set_direction(ElevatorDirection::Down);
while elevator.read_floor_sensor() != Some(0) {
if let Some(floor) = elevator.read_floor_sensor() {
elevator.set_floor_indicator(floor);
}
}
elevator.set_floor_indicator(0);
elevator.set_direction(ElevatorDirection::Up);
while elevator.read_floor_sensor() != Some(ElevatorInterface::N_FLOORS-1) {
if let Some(floor) = elevator.read_floor_sensor() {
elevator.set_floor_indicator(floor);
}
}
elevator.set_floor_indicator(ElevatorInterface::N_FLOORS-1);
elevator.set_direction(ElevatorDirection::Down);
while elevator.read_floor_sensor() != Some(ElevatorInterface::N_FLOORS-2) {}
elevator.set_floor_indicator(ElevatorInterface::N_FLOORS-2);
elevator.set_direction(ElevatorDirection::Stop);
}
#[test]
fn test_cab_buttons() {
let elevator = ELEVATOR.lock().unwrap();
for i in rand::seq::sample_indices(&mut rand::thread_rng(), ElevatorInterface::N_FLOORS as usize, ElevatorInterface::N_FLOORS as usize).into_iter() {
elevator.set_order_button_light(ButtonType::Cab, i as u8, true);
thread::sleep(Duration::new(0, 200000000));
elevator.set_order_button_light(ButtonType::Cab, i as u8, false);
thread::sleep(Duration::new(0, 200000000));
elevator.set_order_button_light(ButtonType::Cab, i as u8, true);
while !elevator.read_order_button(ButtonType::Cab, i as u8) {}
elevator.set_order_button_light(ButtonType::Cab, i as u8, false);
}
}
#[test]
fn test_hall_up_buttons() {
let elevator = ELEVATOR.lock().unwrap();
for i in rand::seq::sample_indices(&mut rand::thread_rng(), ElevatorInterface::N_FLOORS as usize - 1, ElevatorInterface::N_FLOORS as usize - 1).into_iter() {
elevator.set_order_button_light(ButtonType::HallUp, i as u8, true);
thread::sleep(Duration::new(0, 200000000));
elevator.set_order_button_light(ButtonType::HallUp, i as u8, false);
thread::sleep(Duration::new(0, 200000000));
elevator.set_order_button_light(ButtonType::HallUp, i as u8, true);
while !elevator.read_order_button(ButtonType::HallUp, i as u8) {}
elevator.set_order_button_light(ButtonType::HallUp, i as u8, false);
}
}
#[test]
fn test_hall_down_buttons() {
let elevator = ELEVATOR.lock().unwrap();
for i in rand::seq::sample_indices(&mut rand::thread_rng(), ElevatorInterface::N_FLOORS as usize - 1, ElevatorInterface::N_FLOORS as usize - 1).into_iter() {
elevator.set_order_button_light(ButtonType::HallDown, i as u8 + 1, true);
thread::sleep(Duration::new(0, 200000000));
elevator.set_order_button_light(ButtonType::HallDown, i as u8 + 1, false);
thread::sleep(Duration::new(0, 200000000));
elevator.set_order_button_light(ButtonType::HallDown, i as u8 + 1, true);
while !elevator.read_order_button(ButtonType::HallDown, i as u8 + 1) {}
elevator.set_order_button_light(ButtonType::HallDown, i as u8 + 1, false);
}
}
#[test]
fn test_stop_button() {
let elevator = ELEVATOR.lock().unwrap();
elevator.set_stop_button_light(true);
thread::sleep(Duration::new(0, 200000000));
elevator.set_stop_button_light(false);
thread::sleep(Duration::new(0, 200000000));
elevator.set_stop_button_light(true);
while !elevator.read_stop_button() {}
elevator.set_stop_button_light(false);
}
#[test]
fn door_test() {
let elevator = ELEVATOR.lock().unwrap();
for i in 0..4 {
elevator.set_door_light(true);
thread::sleep(Duration::new(0, 100000000));
elevator.set_door_light(false);
thread::sleep(Duration::new(0, 100000000));
}
elevator.set_door_light(true);
while !elevator.read_obstruction_sensor() {}
thread::sleep(Duration::new(0, 500000000));
elevator.set_door_light(false);
while elevator.read_obstruction_sensor() {}
thread::sleep(Duration::new(0, 500000000));
elevator.set_door_light(true);
while !elevator.read_obstruction_sensor() {}
thread::sleep(Duration::new(0, 500000000));
elevator.set_door_light(false);
while elevator.read_obstruction_sensor() {}
thread::sleep(Duration::new(0, 500000000));
elevator.set_door_light(true);
while !elevator.read_obstruction_sensor() {}
thread::sleep(Duration::new(0, 500000000));
elevator.set_door_light(false);
while elevator.read_obstruction_sensor() {}
}
}
|
/// An enum to represent all characters in the LatinExtendedB block.
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum LatinExtendedB {
/// \u{180}: 'ƀ'
LatinSmallLetterBWithStroke,
/// \u{181}: 'Ɓ'
LatinCapitalLetterBWithHook,
/// \u{182}: 'Ƃ'
LatinCapitalLetterBWithTopbar,
/// \u{183}: 'ƃ'
LatinSmallLetterBWithTopbar,
/// \u{184}: 'Ƅ'
LatinCapitalLetterToneSix,
/// \u{185}: 'ƅ'
LatinSmallLetterToneSix,
/// \u{186}: 'Ɔ'
LatinCapitalLetterOpenO,
/// \u{187}: 'Ƈ'
LatinCapitalLetterCWithHook,
/// \u{188}: 'ƈ'
LatinSmallLetterCWithHook,
/// \u{189}: 'Ɖ'
LatinCapitalLetterAfricanD,
/// \u{18a}: 'Ɗ'
LatinCapitalLetterDWithHook,
/// \u{18b}: 'Ƌ'
LatinCapitalLetterDWithTopbar,
/// \u{18c}: 'ƌ'
LatinSmallLetterDWithTopbar,
/// \u{18d}: 'ƍ'
LatinSmallLetterTurnedDelta,
/// \u{18e}: 'Ǝ'
LatinCapitalLetterReversedE,
/// \u{18f}: 'Ə'
LatinCapitalLetterSchwa,
/// \u{190}: 'Ɛ'
LatinCapitalLetterOpenE,
/// \u{191}: 'Ƒ'
LatinCapitalLetterFWithHook,
/// \u{192}: 'ƒ'
LatinSmallLetterFWithHook,
/// \u{193}: 'Ɠ'
LatinCapitalLetterGWithHook,
/// \u{194}: 'Ɣ'
LatinCapitalLetterGamma,
/// \u{195}: 'ƕ'
LatinSmallLetterHv,
/// \u{196}: 'Ɩ'
LatinCapitalLetterIota,
/// \u{197}: 'Ɨ'
LatinCapitalLetterIWithStroke,
/// \u{198}: 'Ƙ'
LatinCapitalLetterKWithHook,
/// \u{199}: 'ƙ'
LatinSmallLetterKWithHook,
/// \u{19a}: 'ƚ'
LatinSmallLetterLWithBar,
/// \u{19b}: 'ƛ'
LatinSmallLetterLambdaWithStroke,
/// \u{19c}: 'Ɯ'
LatinCapitalLetterTurnedM,
/// \u{19d}: 'Ɲ'
LatinCapitalLetterNWithLeftHook,
/// \u{19e}: 'ƞ'
LatinSmallLetterNWithLongRightLeg,
/// \u{19f}: 'Ɵ'
LatinCapitalLetterOWithMiddleTilde,
/// \u{1a0}: 'Ơ'
LatinCapitalLetterOWithHorn,
/// \u{1a1}: 'ơ'
LatinSmallLetterOWithHorn,
/// \u{1a2}: 'Ƣ'
LatinCapitalLetterOi,
/// \u{1a3}: 'ƣ'
LatinSmallLetterOi,
/// \u{1a4}: 'Ƥ'
LatinCapitalLetterPWithHook,
/// \u{1a5}: 'ƥ'
LatinSmallLetterPWithHook,
/// \u{1a6}: 'Ʀ'
LatinLetterYr,
/// \u{1a7}: 'Ƨ'
LatinCapitalLetterToneTwo,
/// \u{1a8}: 'ƨ'
LatinSmallLetterToneTwo,
/// \u{1a9}: 'Ʃ'
LatinCapitalLetterEsh,
/// \u{1aa}: 'ƪ'
LatinLetterReversedEshLoop,
/// \u{1ab}: 'ƫ'
LatinSmallLetterTWithPalatalHook,
/// \u{1ac}: 'Ƭ'
LatinCapitalLetterTWithHook,
/// \u{1ad}: 'ƭ'
LatinSmallLetterTWithHook,
/// \u{1ae}: 'Ʈ'
LatinCapitalLetterTWithRetroflexHook,
/// \u{1af}: 'Ư'
LatinCapitalLetterUWithHorn,
/// \u{1b0}: 'ư'
LatinSmallLetterUWithHorn,
/// \u{1b1}: 'Ʊ'
LatinCapitalLetterUpsilon,
/// \u{1b2}: 'Ʋ'
LatinCapitalLetterVWithHook,
/// \u{1b3}: 'Ƴ'
LatinCapitalLetterYWithHook,
/// \u{1b4}: 'ƴ'
LatinSmallLetterYWithHook,
/// \u{1b5}: 'Ƶ'
LatinCapitalLetterZWithStroke,
/// \u{1b6}: 'ƶ'
LatinSmallLetterZWithStroke,
/// \u{1b7}: 'Ʒ'
LatinCapitalLetterEzh,
/// \u{1b8}: 'Ƹ'
LatinCapitalLetterEzhReversed,
/// \u{1b9}: 'ƹ'
LatinSmallLetterEzhReversed,
/// \u{1ba}: 'ƺ'
LatinSmallLetterEzhWithTail,
/// \u{1bb}: 'ƻ'
LatinLetterTwoWithStroke,
/// \u{1bc}: 'Ƽ'
LatinCapitalLetterToneFive,
/// \u{1bd}: 'ƽ'
LatinSmallLetterToneFive,
/// \u{1be}: 'ƾ'
LatinLetterInvertedGlottalStopWithStroke,
/// \u{1bf}: 'ƿ'
LatinLetterWynn,
/// \u{1c0}: 'ǀ'
LatinLetterDentalClick,
/// \u{1c1}: 'ǁ'
LatinLetterLateralClick,
/// \u{1c2}: 'ǂ'
LatinLetterAlveolarClick,
/// \u{1c3}: 'ǃ'
LatinLetterRetroflexClick,
/// \u{1c4}: 'DŽ'
LatinCapitalLetterDzWithCaron,
/// \u{1c5}: 'Dž'
LatinCapitalLetterDWithSmallLetterZWithCaron,
/// \u{1c6}: 'dž'
LatinSmallLetterDzWithCaron,
/// \u{1c7}: 'LJ'
LatinCapitalLetterLj,
/// \u{1c8}: 'Lj'
LatinCapitalLetterLWithSmallLetterJ,
/// \u{1c9}: 'lj'
LatinSmallLetterLj,
/// \u{1ca}: 'NJ'
LatinCapitalLetterNj,
/// \u{1cb}: 'Nj'
LatinCapitalLetterNWithSmallLetterJ,
/// \u{1cc}: 'nj'
LatinSmallLetterNj,
/// \u{1cd}: 'Ǎ'
LatinCapitalLetterAWithCaron,
/// \u{1ce}: 'ǎ'
LatinSmallLetterAWithCaron,
/// \u{1cf}: 'Ǐ'
LatinCapitalLetterIWithCaron,
/// \u{1d0}: 'ǐ'
LatinSmallLetterIWithCaron,
/// \u{1d1}: 'Ǒ'
LatinCapitalLetterOWithCaron,
/// \u{1d2}: 'ǒ'
LatinSmallLetterOWithCaron,
/// \u{1d3}: 'Ǔ'
LatinCapitalLetterUWithCaron,
/// \u{1d4}: 'ǔ'
LatinSmallLetterUWithCaron,
/// \u{1d5}: 'Ǖ'
LatinCapitalLetterUWithDiaeresisAndMacron,
/// \u{1d6}: 'ǖ'
LatinSmallLetterUWithDiaeresisAndMacron,
/// \u{1d7}: 'Ǘ'
LatinCapitalLetterUWithDiaeresisAndAcute,
/// \u{1d8}: 'ǘ'
LatinSmallLetterUWithDiaeresisAndAcute,
/// \u{1d9}: 'Ǚ'
LatinCapitalLetterUWithDiaeresisAndCaron,
/// \u{1da}: 'ǚ'
LatinSmallLetterUWithDiaeresisAndCaron,
/// \u{1db}: 'Ǜ'
LatinCapitalLetterUWithDiaeresisAndGrave,
/// \u{1dc}: 'ǜ'
LatinSmallLetterUWithDiaeresisAndGrave,
/// \u{1dd}: 'ǝ'
LatinSmallLetterTurnedE,
/// \u{1de}: 'Ǟ'
LatinCapitalLetterAWithDiaeresisAndMacron,
/// \u{1df}: 'ǟ'
LatinSmallLetterAWithDiaeresisAndMacron,
/// \u{1e0}: 'Ǡ'
LatinCapitalLetterAWithDotAboveAndMacron,
/// \u{1e1}: 'ǡ'
LatinSmallLetterAWithDotAboveAndMacron,
/// \u{1e2}: 'Ǣ'
LatinCapitalLetterAeWithMacron,
/// \u{1e3}: 'ǣ'
LatinSmallLetterAeWithMacron,
/// \u{1e4}: 'Ǥ'
LatinCapitalLetterGWithStroke,
/// \u{1e5}: 'ǥ'
LatinSmallLetterGWithStroke,
/// \u{1e6}: 'Ǧ'
LatinCapitalLetterGWithCaron,
/// \u{1e7}: 'ǧ'
LatinSmallLetterGWithCaron,
/// \u{1e8}: 'Ǩ'
LatinCapitalLetterKWithCaron,
/// \u{1e9}: 'ǩ'
LatinSmallLetterKWithCaron,
/// \u{1ea}: 'Ǫ'
LatinCapitalLetterOWithOgonek,
/// \u{1eb}: 'ǫ'
LatinSmallLetterOWithOgonek,
/// \u{1ec}: 'Ǭ'
LatinCapitalLetterOWithOgonekAndMacron,
/// \u{1ed}: 'ǭ'
LatinSmallLetterOWithOgonekAndMacron,
/// \u{1ee}: 'Ǯ'
LatinCapitalLetterEzhWithCaron,
/// \u{1ef}: 'ǯ'
LatinSmallLetterEzhWithCaron,
/// \u{1f0}: 'ǰ'
LatinSmallLetterJWithCaron,
/// \u{1f1}: 'DZ'
LatinCapitalLetterDz,
/// \u{1f2}: 'Dz'
LatinCapitalLetterDWithSmallLetterZ,
/// \u{1f3}: 'dz'
LatinSmallLetterDz,
/// \u{1f4}: 'Ǵ'
LatinCapitalLetterGWithAcute,
/// \u{1f5}: 'ǵ'
LatinSmallLetterGWithAcute,
/// \u{1f6}: 'Ƕ'
LatinCapitalLetterHwair,
/// \u{1f7}: 'Ƿ'
LatinCapitalLetterWynn,
/// \u{1f8}: 'Ǹ'
LatinCapitalLetterNWithGrave,
/// \u{1f9}: 'ǹ'
LatinSmallLetterNWithGrave,
/// \u{1fa}: 'Ǻ'
LatinCapitalLetterAWithRingAboveAndAcute,
/// \u{1fb}: 'ǻ'
LatinSmallLetterAWithRingAboveAndAcute,
/// \u{1fc}: 'Ǽ'
LatinCapitalLetterAeWithAcute,
/// \u{1fd}: 'ǽ'
LatinSmallLetterAeWithAcute,
/// \u{1fe}: 'Ǿ'
LatinCapitalLetterOWithStrokeAndAcute,
/// \u{1ff}: 'ǿ'
LatinSmallLetterOWithStrokeAndAcute,
/// \u{200}: 'Ȁ'
LatinCapitalLetterAWithDoubleGrave,
/// \u{201}: 'ȁ'
LatinSmallLetterAWithDoubleGrave,
/// \u{202}: 'Ȃ'
LatinCapitalLetterAWithInvertedBreve,
/// \u{203}: 'ȃ'
LatinSmallLetterAWithInvertedBreve,
/// \u{204}: 'Ȅ'
LatinCapitalLetterEWithDoubleGrave,
/// \u{205}: 'ȅ'
LatinSmallLetterEWithDoubleGrave,
/// \u{206}: 'Ȇ'
LatinCapitalLetterEWithInvertedBreve,
/// \u{207}: 'ȇ'
LatinSmallLetterEWithInvertedBreve,
/// \u{208}: 'Ȉ'
LatinCapitalLetterIWithDoubleGrave,
/// \u{209}: 'ȉ'
LatinSmallLetterIWithDoubleGrave,
/// \u{20a}: 'Ȋ'
LatinCapitalLetterIWithInvertedBreve,
/// \u{20b}: 'ȋ'
LatinSmallLetterIWithInvertedBreve,
/// \u{20c}: 'Ȍ'
LatinCapitalLetterOWithDoubleGrave,
/// \u{20d}: 'ȍ'
LatinSmallLetterOWithDoubleGrave,
/// \u{20e}: 'Ȏ'
LatinCapitalLetterOWithInvertedBreve,
/// \u{20f}: 'ȏ'
LatinSmallLetterOWithInvertedBreve,
/// \u{210}: 'Ȑ'
LatinCapitalLetterRWithDoubleGrave,
/// \u{211}: 'ȑ'
LatinSmallLetterRWithDoubleGrave,
/// \u{212}: 'Ȓ'
LatinCapitalLetterRWithInvertedBreve,
/// \u{213}: 'ȓ'
LatinSmallLetterRWithInvertedBreve,
/// \u{214}: 'Ȕ'
LatinCapitalLetterUWithDoubleGrave,
/// \u{215}: 'ȕ'
LatinSmallLetterUWithDoubleGrave,
/// \u{216}: 'Ȗ'
LatinCapitalLetterUWithInvertedBreve,
/// \u{217}: 'ȗ'
LatinSmallLetterUWithInvertedBreve,
/// \u{218}: 'Ș'
LatinCapitalLetterSWithCommaBelow,
/// \u{219}: 'ș'
LatinSmallLetterSWithCommaBelow,
/// \u{21a}: 'Ț'
LatinCapitalLetterTWithCommaBelow,
/// \u{21b}: 'ț'
LatinSmallLetterTWithCommaBelow,
/// \u{21c}: 'Ȝ'
LatinCapitalLetterYogh,
/// \u{21d}: 'ȝ'
LatinSmallLetterYogh,
/// \u{21e}: 'Ȟ'
LatinCapitalLetterHWithCaron,
/// \u{21f}: 'ȟ'
LatinSmallLetterHWithCaron,
/// \u{220}: 'Ƞ'
LatinCapitalLetterNWithLongRightLeg,
/// \u{221}: 'ȡ'
LatinSmallLetterDWithCurl,
/// \u{222}: 'Ȣ'
LatinCapitalLetterOu,
/// \u{223}: 'ȣ'
LatinSmallLetterOu,
/// \u{224}: 'Ȥ'
LatinCapitalLetterZWithHook,
/// \u{225}: 'ȥ'
LatinSmallLetterZWithHook,
/// \u{226}: 'Ȧ'
LatinCapitalLetterAWithDotAbove,
/// \u{227}: 'ȧ'
LatinSmallLetterAWithDotAbove,
/// \u{228}: 'Ȩ'
LatinCapitalLetterEWithCedilla,
/// \u{229}: 'ȩ'
LatinSmallLetterEWithCedilla,
/// \u{22a}: 'Ȫ'
LatinCapitalLetterOWithDiaeresisAndMacron,
/// \u{22b}: 'ȫ'
LatinSmallLetterOWithDiaeresisAndMacron,
/// \u{22c}: 'Ȭ'
LatinCapitalLetterOWithTildeAndMacron,
/// \u{22d}: 'ȭ'
LatinSmallLetterOWithTildeAndMacron,
/// \u{22e}: 'Ȯ'
LatinCapitalLetterOWithDotAbove,
/// \u{22f}: 'ȯ'
LatinSmallLetterOWithDotAbove,
/// \u{230}: 'Ȱ'
LatinCapitalLetterOWithDotAboveAndMacron,
/// \u{231}: 'ȱ'
LatinSmallLetterOWithDotAboveAndMacron,
/// \u{232}: 'Ȳ'
LatinCapitalLetterYWithMacron,
/// \u{233}: 'ȳ'
LatinSmallLetterYWithMacron,
/// \u{234}: 'ȴ'
LatinSmallLetterLWithCurl,
/// \u{235}: 'ȵ'
LatinSmallLetterNWithCurl,
/// \u{236}: 'ȶ'
LatinSmallLetterTWithCurl,
/// \u{237}: 'ȷ'
LatinSmallLetterDotlessJ,
/// \u{238}: 'ȸ'
LatinSmallLetterDbDigraph,
/// \u{239}: 'ȹ'
LatinSmallLetterQpDigraph,
/// \u{23a}: 'Ⱥ'
LatinCapitalLetterAWithStroke,
/// \u{23b}: 'Ȼ'
LatinCapitalLetterCWithStroke,
/// \u{23c}: 'ȼ'
LatinSmallLetterCWithStroke,
/// \u{23d}: 'Ƚ'
LatinCapitalLetterLWithBar,
/// \u{23e}: 'Ⱦ'
LatinCapitalLetterTWithDiagonalStroke,
/// \u{23f}: 'ȿ'
LatinSmallLetterSWithSwashTail,
/// \u{240}: 'ɀ'
LatinSmallLetterZWithSwashTail,
/// \u{241}: 'Ɂ'
LatinCapitalLetterGlottalStop,
/// \u{242}: 'ɂ'
LatinSmallLetterGlottalStop,
/// \u{243}: 'Ƀ'
LatinCapitalLetterBWithStroke,
/// \u{244}: 'Ʉ'
LatinCapitalLetterUBar,
/// \u{245}: 'Ʌ'
LatinCapitalLetterTurnedV,
/// \u{246}: 'Ɇ'
LatinCapitalLetterEWithStroke,
/// \u{247}: 'ɇ'
LatinSmallLetterEWithStroke,
/// \u{248}: 'Ɉ'
LatinCapitalLetterJWithStroke,
/// \u{249}: 'ɉ'
LatinSmallLetterJWithStroke,
/// \u{24a}: 'Ɋ'
LatinCapitalLetterSmallQWithHookTail,
/// \u{24b}: 'ɋ'
LatinSmallLetterQWithHookTail,
/// \u{24c}: 'Ɍ'
LatinCapitalLetterRWithStroke,
/// \u{24d}: 'ɍ'
LatinSmallLetterRWithStroke,
/// \u{24e}: 'Ɏ'
LatinCapitalLetterYWithStroke,
}
impl Into<char> for LatinExtendedB {
fn into(self) -> char {
match self {
LatinExtendedB::LatinSmallLetterBWithStroke => 'ƀ',
LatinExtendedB::LatinCapitalLetterBWithHook => 'Ɓ',
LatinExtendedB::LatinCapitalLetterBWithTopbar => 'Ƃ',
LatinExtendedB::LatinSmallLetterBWithTopbar => 'ƃ',
LatinExtendedB::LatinCapitalLetterToneSix => 'Ƅ',
LatinExtendedB::LatinSmallLetterToneSix => 'ƅ',
LatinExtendedB::LatinCapitalLetterOpenO => 'Ɔ',
LatinExtendedB::LatinCapitalLetterCWithHook => 'Ƈ',
LatinExtendedB::LatinSmallLetterCWithHook => 'ƈ',
LatinExtendedB::LatinCapitalLetterAfricanD => 'Ɖ',
LatinExtendedB::LatinCapitalLetterDWithHook => 'Ɗ',
LatinExtendedB::LatinCapitalLetterDWithTopbar => 'Ƌ',
LatinExtendedB::LatinSmallLetterDWithTopbar => 'ƌ',
LatinExtendedB::LatinSmallLetterTurnedDelta => 'ƍ',
LatinExtendedB::LatinCapitalLetterReversedE => 'Ǝ',
LatinExtendedB::LatinCapitalLetterSchwa => 'Ə',
LatinExtendedB::LatinCapitalLetterOpenE => 'Ɛ',
LatinExtendedB::LatinCapitalLetterFWithHook => 'Ƒ',
LatinExtendedB::LatinSmallLetterFWithHook => 'ƒ',
LatinExtendedB::LatinCapitalLetterGWithHook => 'Ɠ',
LatinExtendedB::LatinCapitalLetterGamma => 'Ɣ',
LatinExtendedB::LatinSmallLetterHv => 'ƕ',
LatinExtendedB::LatinCapitalLetterIota => 'Ɩ',
LatinExtendedB::LatinCapitalLetterIWithStroke => 'Ɨ',
LatinExtendedB::LatinCapitalLetterKWithHook => 'Ƙ',
LatinExtendedB::LatinSmallLetterKWithHook => 'ƙ',
LatinExtendedB::LatinSmallLetterLWithBar => 'ƚ',
LatinExtendedB::LatinSmallLetterLambdaWithStroke => 'ƛ',
LatinExtendedB::LatinCapitalLetterTurnedM => 'Ɯ',
LatinExtendedB::LatinCapitalLetterNWithLeftHook => 'Ɲ',
LatinExtendedB::LatinSmallLetterNWithLongRightLeg => 'ƞ',
LatinExtendedB::LatinCapitalLetterOWithMiddleTilde => 'Ɵ',
LatinExtendedB::LatinCapitalLetterOWithHorn => 'Ơ',
LatinExtendedB::LatinSmallLetterOWithHorn => 'ơ',
LatinExtendedB::LatinCapitalLetterOi => 'Ƣ',
LatinExtendedB::LatinSmallLetterOi => 'ƣ',
LatinExtendedB::LatinCapitalLetterPWithHook => 'Ƥ',
LatinExtendedB::LatinSmallLetterPWithHook => 'ƥ',
LatinExtendedB::LatinLetterYr => 'Ʀ',
LatinExtendedB::LatinCapitalLetterToneTwo => 'Ƨ',
LatinExtendedB::LatinSmallLetterToneTwo => 'ƨ',
LatinExtendedB::LatinCapitalLetterEsh => 'Ʃ',
LatinExtendedB::LatinLetterReversedEshLoop => 'ƪ',
LatinExtendedB::LatinSmallLetterTWithPalatalHook => 'ƫ',
LatinExtendedB::LatinCapitalLetterTWithHook => 'Ƭ',
LatinExtendedB::LatinSmallLetterTWithHook => 'ƭ',
LatinExtendedB::LatinCapitalLetterTWithRetroflexHook => 'Ʈ',
LatinExtendedB::LatinCapitalLetterUWithHorn => 'Ư',
LatinExtendedB::LatinSmallLetterUWithHorn => 'ư',
LatinExtendedB::LatinCapitalLetterUpsilon => 'Ʊ',
LatinExtendedB::LatinCapitalLetterVWithHook => 'Ʋ',
LatinExtendedB::LatinCapitalLetterYWithHook => 'Ƴ',
LatinExtendedB::LatinSmallLetterYWithHook => 'ƴ',
LatinExtendedB::LatinCapitalLetterZWithStroke => 'Ƶ',
LatinExtendedB::LatinSmallLetterZWithStroke => 'ƶ',
LatinExtendedB::LatinCapitalLetterEzh => 'Ʒ',
LatinExtendedB::LatinCapitalLetterEzhReversed => 'Ƹ',
LatinExtendedB::LatinSmallLetterEzhReversed => 'ƹ',
LatinExtendedB::LatinSmallLetterEzhWithTail => 'ƺ',
LatinExtendedB::LatinLetterTwoWithStroke => 'ƻ',
LatinExtendedB::LatinCapitalLetterToneFive => 'Ƽ',
LatinExtendedB::LatinSmallLetterToneFive => 'ƽ',
LatinExtendedB::LatinLetterInvertedGlottalStopWithStroke => 'ƾ',
LatinExtendedB::LatinLetterWynn => 'ƿ',
LatinExtendedB::LatinLetterDentalClick => 'ǀ',
LatinExtendedB::LatinLetterLateralClick => 'ǁ',
LatinExtendedB::LatinLetterAlveolarClick => 'ǂ',
LatinExtendedB::LatinLetterRetroflexClick => 'ǃ',
LatinExtendedB::LatinCapitalLetterDzWithCaron => 'DŽ',
LatinExtendedB::LatinCapitalLetterDWithSmallLetterZWithCaron => 'Dž',
LatinExtendedB::LatinSmallLetterDzWithCaron => 'dž',
LatinExtendedB::LatinCapitalLetterLj => 'LJ',
LatinExtendedB::LatinCapitalLetterLWithSmallLetterJ => 'Lj',
LatinExtendedB::LatinSmallLetterLj => 'lj',
LatinExtendedB::LatinCapitalLetterNj => 'NJ',
LatinExtendedB::LatinCapitalLetterNWithSmallLetterJ => 'Nj',
LatinExtendedB::LatinSmallLetterNj => 'nj',
LatinExtendedB::LatinCapitalLetterAWithCaron => 'Ǎ',
LatinExtendedB::LatinSmallLetterAWithCaron => 'ǎ',
LatinExtendedB::LatinCapitalLetterIWithCaron => 'Ǐ',
LatinExtendedB::LatinSmallLetterIWithCaron => 'ǐ',
LatinExtendedB::LatinCapitalLetterOWithCaron => 'Ǒ',
LatinExtendedB::LatinSmallLetterOWithCaron => 'ǒ',
LatinExtendedB::LatinCapitalLetterUWithCaron => 'Ǔ',
LatinExtendedB::LatinSmallLetterUWithCaron => 'ǔ',
LatinExtendedB::LatinCapitalLetterUWithDiaeresisAndMacron => 'Ǖ',
LatinExtendedB::LatinSmallLetterUWithDiaeresisAndMacron => 'ǖ',
LatinExtendedB::LatinCapitalLetterUWithDiaeresisAndAcute => 'Ǘ',
LatinExtendedB::LatinSmallLetterUWithDiaeresisAndAcute => 'ǘ',
LatinExtendedB::LatinCapitalLetterUWithDiaeresisAndCaron => 'Ǚ',
LatinExtendedB::LatinSmallLetterUWithDiaeresisAndCaron => 'ǚ',
LatinExtendedB::LatinCapitalLetterUWithDiaeresisAndGrave => 'Ǜ',
LatinExtendedB::LatinSmallLetterUWithDiaeresisAndGrave => 'ǜ',
LatinExtendedB::LatinSmallLetterTurnedE => 'ǝ',
LatinExtendedB::LatinCapitalLetterAWithDiaeresisAndMacron => 'Ǟ',
LatinExtendedB::LatinSmallLetterAWithDiaeresisAndMacron => 'ǟ',
LatinExtendedB::LatinCapitalLetterAWithDotAboveAndMacron => 'Ǡ',
LatinExtendedB::LatinSmallLetterAWithDotAboveAndMacron => 'ǡ',
LatinExtendedB::LatinCapitalLetterAeWithMacron => 'Ǣ',
LatinExtendedB::LatinSmallLetterAeWithMacron => 'ǣ',
LatinExtendedB::LatinCapitalLetterGWithStroke => 'Ǥ',
LatinExtendedB::LatinSmallLetterGWithStroke => 'ǥ',
LatinExtendedB::LatinCapitalLetterGWithCaron => 'Ǧ',
LatinExtendedB::LatinSmallLetterGWithCaron => 'ǧ',
LatinExtendedB::LatinCapitalLetterKWithCaron => 'Ǩ',
LatinExtendedB::LatinSmallLetterKWithCaron => 'ǩ',
LatinExtendedB::LatinCapitalLetterOWithOgonek => 'Ǫ',
LatinExtendedB::LatinSmallLetterOWithOgonek => 'ǫ',
LatinExtendedB::LatinCapitalLetterOWithOgonekAndMacron => 'Ǭ',
LatinExtendedB::LatinSmallLetterOWithOgonekAndMacron => 'ǭ',
LatinExtendedB::LatinCapitalLetterEzhWithCaron => 'Ǯ',
LatinExtendedB::LatinSmallLetterEzhWithCaron => 'ǯ',
LatinExtendedB::LatinSmallLetterJWithCaron => 'ǰ',
LatinExtendedB::LatinCapitalLetterDz => 'DZ',
LatinExtendedB::LatinCapitalLetterDWithSmallLetterZ => 'Dz',
LatinExtendedB::LatinSmallLetterDz => 'dz',
LatinExtendedB::LatinCapitalLetterGWithAcute => 'Ǵ',
LatinExtendedB::LatinSmallLetterGWithAcute => 'ǵ',
LatinExtendedB::LatinCapitalLetterHwair => 'Ƕ',
LatinExtendedB::LatinCapitalLetterWynn => 'Ƿ',
LatinExtendedB::LatinCapitalLetterNWithGrave => 'Ǹ',
LatinExtendedB::LatinSmallLetterNWithGrave => 'ǹ',
LatinExtendedB::LatinCapitalLetterAWithRingAboveAndAcute => 'Ǻ',
LatinExtendedB::LatinSmallLetterAWithRingAboveAndAcute => 'ǻ',
LatinExtendedB::LatinCapitalLetterAeWithAcute => 'Ǽ',
LatinExtendedB::LatinSmallLetterAeWithAcute => 'ǽ',
LatinExtendedB::LatinCapitalLetterOWithStrokeAndAcute => 'Ǿ',
LatinExtendedB::LatinSmallLetterOWithStrokeAndAcute => 'ǿ',
LatinExtendedB::LatinCapitalLetterAWithDoubleGrave => 'Ȁ',
LatinExtendedB::LatinSmallLetterAWithDoubleGrave => 'ȁ',
LatinExtendedB::LatinCapitalLetterAWithInvertedBreve => 'Ȃ',
LatinExtendedB::LatinSmallLetterAWithInvertedBreve => 'ȃ',
LatinExtendedB::LatinCapitalLetterEWithDoubleGrave => 'Ȅ',
LatinExtendedB::LatinSmallLetterEWithDoubleGrave => 'ȅ',
LatinExtendedB::LatinCapitalLetterEWithInvertedBreve => 'Ȇ',
LatinExtendedB::LatinSmallLetterEWithInvertedBreve => 'ȇ',
LatinExtendedB::LatinCapitalLetterIWithDoubleGrave => 'Ȉ',
LatinExtendedB::LatinSmallLetterIWithDoubleGrave => 'ȉ',
LatinExtendedB::LatinCapitalLetterIWithInvertedBreve => 'Ȋ',
LatinExtendedB::LatinSmallLetterIWithInvertedBreve => 'ȋ',
LatinExtendedB::LatinCapitalLetterOWithDoubleGrave => 'Ȍ',
LatinExtendedB::LatinSmallLetterOWithDoubleGrave => 'ȍ',
LatinExtendedB::LatinCapitalLetterOWithInvertedBreve => 'Ȏ',
LatinExtendedB::LatinSmallLetterOWithInvertedBreve => 'ȏ',
LatinExtendedB::LatinCapitalLetterRWithDoubleGrave => 'Ȑ',
LatinExtendedB::LatinSmallLetterRWithDoubleGrave => 'ȑ',
LatinExtendedB::LatinCapitalLetterRWithInvertedBreve => 'Ȓ',
LatinExtendedB::LatinSmallLetterRWithInvertedBreve => 'ȓ',
LatinExtendedB::LatinCapitalLetterUWithDoubleGrave => 'Ȕ',
LatinExtendedB::LatinSmallLetterUWithDoubleGrave => 'ȕ',
LatinExtendedB::LatinCapitalLetterUWithInvertedBreve => 'Ȗ',
LatinExtendedB::LatinSmallLetterUWithInvertedBreve => 'ȗ',
LatinExtendedB::LatinCapitalLetterSWithCommaBelow => 'Ș',
LatinExtendedB::LatinSmallLetterSWithCommaBelow => 'ș',
LatinExtendedB::LatinCapitalLetterTWithCommaBelow => 'Ț',
LatinExtendedB::LatinSmallLetterTWithCommaBelow => 'ț',
LatinExtendedB::LatinCapitalLetterYogh => 'Ȝ',
LatinExtendedB::LatinSmallLetterYogh => 'ȝ',
LatinExtendedB::LatinCapitalLetterHWithCaron => 'Ȟ',
LatinExtendedB::LatinSmallLetterHWithCaron => 'ȟ',
LatinExtendedB::LatinCapitalLetterNWithLongRightLeg => 'Ƞ',
LatinExtendedB::LatinSmallLetterDWithCurl => 'ȡ',
LatinExtendedB::LatinCapitalLetterOu => 'Ȣ',
LatinExtendedB::LatinSmallLetterOu => 'ȣ',
LatinExtendedB::LatinCapitalLetterZWithHook => 'Ȥ',
LatinExtendedB::LatinSmallLetterZWithHook => 'ȥ',
LatinExtendedB::LatinCapitalLetterAWithDotAbove => 'Ȧ',
LatinExtendedB::LatinSmallLetterAWithDotAbove => 'ȧ',
LatinExtendedB::LatinCapitalLetterEWithCedilla => 'Ȩ',
LatinExtendedB::LatinSmallLetterEWithCedilla => 'ȩ',
LatinExtendedB::LatinCapitalLetterOWithDiaeresisAndMacron => 'Ȫ',
LatinExtendedB::LatinSmallLetterOWithDiaeresisAndMacron => 'ȫ',
LatinExtendedB::LatinCapitalLetterOWithTildeAndMacron => 'Ȭ',
LatinExtendedB::LatinSmallLetterOWithTildeAndMacron => 'ȭ',
LatinExtendedB::LatinCapitalLetterOWithDotAbove => 'Ȯ',
LatinExtendedB::LatinSmallLetterOWithDotAbove => 'ȯ',
LatinExtendedB::LatinCapitalLetterOWithDotAboveAndMacron => 'Ȱ',
LatinExtendedB::LatinSmallLetterOWithDotAboveAndMacron => 'ȱ',
LatinExtendedB::LatinCapitalLetterYWithMacron => 'Ȳ',
LatinExtendedB::LatinSmallLetterYWithMacron => 'ȳ',
LatinExtendedB::LatinSmallLetterLWithCurl => 'ȴ',
LatinExtendedB::LatinSmallLetterNWithCurl => 'ȵ',
LatinExtendedB::LatinSmallLetterTWithCurl => 'ȶ',
LatinExtendedB::LatinSmallLetterDotlessJ => 'ȷ',
LatinExtendedB::LatinSmallLetterDbDigraph => 'ȸ',
LatinExtendedB::LatinSmallLetterQpDigraph => 'ȹ',
LatinExtendedB::LatinCapitalLetterAWithStroke => 'Ⱥ',
LatinExtendedB::LatinCapitalLetterCWithStroke => 'Ȼ',
LatinExtendedB::LatinSmallLetterCWithStroke => 'ȼ',
LatinExtendedB::LatinCapitalLetterLWithBar => 'Ƚ',
LatinExtendedB::LatinCapitalLetterTWithDiagonalStroke => 'Ⱦ',
LatinExtendedB::LatinSmallLetterSWithSwashTail => 'ȿ',
LatinExtendedB::LatinSmallLetterZWithSwashTail => 'ɀ',
LatinExtendedB::LatinCapitalLetterGlottalStop => 'Ɂ',
LatinExtendedB::LatinSmallLetterGlottalStop => 'ɂ',
LatinExtendedB::LatinCapitalLetterBWithStroke => 'Ƀ',
LatinExtendedB::LatinCapitalLetterUBar => 'Ʉ',
LatinExtendedB::LatinCapitalLetterTurnedV => 'Ʌ',
LatinExtendedB::LatinCapitalLetterEWithStroke => 'Ɇ',
LatinExtendedB::LatinSmallLetterEWithStroke => 'ɇ',
LatinExtendedB::LatinCapitalLetterJWithStroke => 'Ɉ',
LatinExtendedB::LatinSmallLetterJWithStroke => 'ɉ',
LatinExtendedB::LatinCapitalLetterSmallQWithHookTail => 'Ɋ',
LatinExtendedB::LatinSmallLetterQWithHookTail => 'ɋ',
LatinExtendedB::LatinCapitalLetterRWithStroke => 'Ɍ',
LatinExtendedB::LatinSmallLetterRWithStroke => 'ɍ',
LatinExtendedB::LatinCapitalLetterYWithStroke => 'Ɏ',
}
}
}
impl std::convert::TryFrom<char> for LatinExtendedB {
type Error = ();
fn try_from(c: char) -> Result<Self, Self::Error> {
match c {
'ƀ' => Ok(LatinExtendedB::LatinSmallLetterBWithStroke),
'Ɓ' => Ok(LatinExtendedB::LatinCapitalLetterBWithHook),
'Ƃ' => Ok(LatinExtendedB::LatinCapitalLetterBWithTopbar),
'ƃ' => Ok(LatinExtendedB::LatinSmallLetterBWithTopbar),
'Ƅ' => Ok(LatinExtendedB::LatinCapitalLetterToneSix),
'ƅ' => Ok(LatinExtendedB::LatinSmallLetterToneSix),
'Ɔ' => Ok(LatinExtendedB::LatinCapitalLetterOpenO),
'Ƈ' => Ok(LatinExtendedB::LatinCapitalLetterCWithHook),
'ƈ' => Ok(LatinExtendedB::LatinSmallLetterCWithHook),
'Ɖ' => Ok(LatinExtendedB::LatinCapitalLetterAfricanD),
'Ɗ' => Ok(LatinExtendedB::LatinCapitalLetterDWithHook),
'Ƌ' => Ok(LatinExtendedB::LatinCapitalLetterDWithTopbar),
'ƌ' => Ok(LatinExtendedB::LatinSmallLetterDWithTopbar),
'ƍ' => Ok(LatinExtendedB::LatinSmallLetterTurnedDelta),
'Ǝ' => Ok(LatinExtendedB::LatinCapitalLetterReversedE),
'Ə' => Ok(LatinExtendedB::LatinCapitalLetterSchwa),
'Ɛ' => Ok(LatinExtendedB::LatinCapitalLetterOpenE),
'Ƒ' => Ok(LatinExtendedB::LatinCapitalLetterFWithHook),
'ƒ' => Ok(LatinExtendedB::LatinSmallLetterFWithHook),
'Ɠ' => Ok(LatinExtendedB::LatinCapitalLetterGWithHook),
'Ɣ' => Ok(LatinExtendedB::LatinCapitalLetterGamma),
'ƕ' => Ok(LatinExtendedB::LatinSmallLetterHv),
'Ɩ' => Ok(LatinExtendedB::LatinCapitalLetterIota),
'Ɨ' => Ok(LatinExtendedB::LatinCapitalLetterIWithStroke),
'Ƙ' => Ok(LatinExtendedB::LatinCapitalLetterKWithHook),
'ƙ' => Ok(LatinExtendedB::LatinSmallLetterKWithHook),
'ƚ' => Ok(LatinExtendedB::LatinSmallLetterLWithBar),
'ƛ' => Ok(LatinExtendedB::LatinSmallLetterLambdaWithStroke),
'Ɯ' => Ok(LatinExtendedB::LatinCapitalLetterTurnedM),
'Ɲ' => Ok(LatinExtendedB::LatinCapitalLetterNWithLeftHook),
'ƞ' => Ok(LatinExtendedB::LatinSmallLetterNWithLongRightLeg),
'Ɵ' => Ok(LatinExtendedB::LatinCapitalLetterOWithMiddleTilde),
'Ơ' => Ok(LatinExtendedB::LatinCapitalLetterOWithHorn),
'ơ' => Ok(LatinExtendedB::LatinSmallLetterOWithHorn),
'Ƣ' => Ok(LatinExtendedB::LatinCapitalLetterOi),
'ƣ' => Ok(LatinExtendedB::LatinSmallLetterOi),
'Ƥ' => Ok(LatinExtendedB::LatinCapitalLetterPWithHook),
'ƥ' => Ok(LatinExtendedB::LatinSmallLetterPWithHook),
'Ʀ' => Ok(LatinExtendedB::LatinLetterYr),
'Ƨ' => Ok(LatinExtendedB::LatinCapitalLetterToneTwo),
'ƨ' => Ok(LatinExtendedB::LatinSmallLetterToneTwo),
'Ʃ' => Ok(LatinExtendedB::LatinCapitalLetterEsh),
'ƪ' => Ok(LatinExtendedB::LatinLetterReversedEshLoop),
'ƫ' => Ok(LatinExtendedB::LatinSmallLetterTWithPalatalHook),
'Ƭ' => Ok(LatinExtendedB::LatinCapitalLetterTWithHook),
'ƭ' => Ok(LatinExtendedB::LatinSmallLetterTWithHook),
'Ʈ' => Ok(LatinExtendedB::LatinCapitalLetterTWithRetroflexHook),
'Ư' => Ok(LatinExtendedB::LatinCapitalLetterUWithHorn),
'ư' => Ok(LatinExtendedB::LatinSmallLetterUWithHorn),
'Ʊ' => Ok(LatinExtendedB::LatinCapitalLetterUpsilon),
'Ʋ' => Ok(LatinExtendedB::LatinCapitalLetterVWithHook),
'Ƴ' => Ok(LatinExtendedB::LatinCapitalLetterYWithHook),
'ƴ' => Ok(LatinExtendedB::LatinSmallLetterYWithHook),
'Ƶ' => Ok(LatinExtendedB::LatinCapitalLetterZWithStroke),
'ƶ' => Ok(LatinExtendedB::LatinSmallLetterZWithStroke),
'Ʒ' => Ok(LatinExtendedB::LatinCapitalLetterEzh),
'Ƹ' => Ok(LatinExtendedB::LatinCapitalLetterEzhReversed),
'ƹ' => Ok(LatinExtendedB::LatinSmallLetterEzhReversed),
'ƺ' => Ok(LatinExtendedB::LatinSmallLetterEzhWithTail),
'ƻ' => Ok(LatinExtendedB::LatinLetterTwoWithStroke),
'Ƽ' => Ok(LatinExtendedB::LatinCapitalLetterToneFive),
'ƽ' => Ok(LatinExtendedB::LatinSmallLetterToneFive),
'ƾ' => Ok(LatinExtendedB::LatinLetterInvertedGlottalStopWithStroke),
'ƿ' => Ok(LatinExtendedB::LatinLetterWynn),
'ǀ' => Ok(LatinExtendedB::LatinLetterDentalClick),
'ǁ' => Ok(LatinExtendedB::LatinLetterLateralClick),
'ǂ' => Ok(LatinExtendedB::LatinLetterAlveolarClick),
'ǃ' => Ok(LatinExtendedB::LatinLetterRetroflexClick),
'DŽ' => Ok(LatinExtendedB::LatinCapitalLetterDzWithCaron),
'Dž' => Ok(LatinExtendedB::LatinCapitalLetterDWithSmallLetterZWithCaron),
'dž' => Ok(LatinExtendedB::LatinSmallLetterDzWithCaron),
'LJ' => Ok(LatinExtendedB::LatinCapitalLetterLj),
'Lj' => Ok(LatinExtendedB::LatinCapitalLetterLWithSmallLetterJ),
'lj' => Ok(LatinExtendedB::LatinSmallLetterLj),
'NJ' => Ok(LatinExtendedB::LatinCapitalLetterNj),
'Nj' => Ok(LatinExtendedB::LatinCapitalLetterNWithSmallLetterJ),
'nj' => Ok(LatinExtendedB::LatinSmallLetterNj),
'Ǎ' => Ok(LatinExtendedB::LatinCapitalLetterAWithCaron),
'ǎ' => Ok(LatinExtendedB::LatinSmallLetterAWithCaron),
'Ǐ' => Ok(LatinExtendedB::LatinCapitalLetterIWithCaron),
'ǐ' => Ok(LatinExtendedB::LatinSmallLetterIWithCaron),
'Ǒ' => Ok(LatinExtendedB::LatinCapitalLetterOWithCaron),
'ǒ' => Ok(LatinExtendedB::LatinSmallLetterOWithCaron),
'Ǔ' => Ok(LatinExtendedB::LatinCapitalLetterUWithCaron),
'ǔ' => Ok(LatinExtendedB::LatinSmallLetterUWithCaron),
'Ǖ' => Ok(LatinExtendedB::LatinCapitalLetterUWithDiaeresisAndMacron),
'ǖ' => Ok(LatinExtendedB::LatinSmallLetterUWithDiaeresisAndMacron),
'Ǘ' => Ok(LatinExtendedB::LatinCapitalLetterUWithDiaeresisAndAcute),
'ǘ' => Ok(LatinExtendedB::LatinSmallLetterUWithDiaeresisAndAcute),
'Ǚ' => Ok(LatinExtendedB::LatinCapitalLetterUWithDiaeresisAndCaron),
'ǚ' => Ok(LatinExtendedB::LatinSmallLetterUWithDiaeresisAndCaron),
'Ǜ' => Ok(LatinExtendedB::LatinCapitalLetterUWithDiaeresisAndGrave),
'ǜ' => Ok(LatinExtendedB::LatinSmallLetterUWithDiaeresisAndGrave),
'ǝ' => Ok(LatinExtendedB::LatinSmallLetterTurnedE),
'Ǟ' => Ok(LatinExtendedB::LatinCapitalLetterAWithDiaeresisAndMacron),
'ǟ' => Ok(LatinExtendedB::LatinSmallLetterAWithDiaeresisAndMacron),
'Ǡ' => Ok(LatinExtendedB::LatinCapitalLetterAWithDotAboveAndMacron),
'ǡ' => Ok(LatinExtendedB::LatinSmallLetterAWithDotAboveAndMacron),
'Ǣ' => Ok(LatinExtendedB::LatinCapitalLetterAeWithMacron),
'ǣ' => Ok(LatinExtendedB::LatinSmallLetterAeWithMacron),
'Ǥ' => Ok(LatinExtendedB::LatinCapitalLetterGWithStroke),
'ǥ' => Ok(LatinExtendedB::LatinSmallLetterGWithStroke),
'Ǧ' => Ok(LatinExtendedB::LatinCapitalLetterGWithCaron),
'ǧ' => Ok(LatinExtendedB::LatinSmallLetterGWithCaron),
'Ǩ' => Ok(LatinExtendedB::LatinCapitalLetterKWithCaron),
'ǩ' => Ok(LatinExtendedB::LatinSmallLetterKWithCaron),
'Ǫ' => Ok(LatinExtendedB::LatinCapitalLetterOWithOgonek),
'ǫ' => Ok(LatinExtendedB::LatinSmallLetterOWithOgonek),
'Ǭ' => Ok(LatinExtendedB::LatinCapitalLetterOWithOgonekAndMacron),
'ǭ' => Ok(LatinExtendedB::LatinSmallLetterOWithOgonekAndMacron),
'Ǯ' => Ok(LatinExtendedB::LatinCapitalLetterEzhWithCaron),
'ǯ' => Ok(LatinExtendedB::LatinSmallLetterEzhWithCaron),
'ǰ' => Ok(LatinExtendedB::LatinSmallLetterJWithCaron),
'DZ' => Ok(LatinExtendedB::LatinCapitalLetterDz),
'Dz' => Ok(LatinExtendedB::LatinCapitalLetterDWithSmallLetterZ),
'dz' => Ok(LatinExtendedB::LatinSmallLetterDz),
'Ǵ' => Ok(LatinExtendedB::LatinCapitalLetterGWithAcute),
'ǵ' => Ok(LatinExtendedB::LatinSmallLetterGWithAcute),
'Ƕ' => Ok(LatinExtendedB::LatinCapitalLetterHwair),
'Ƿ' => Ok(LatinExtendedB::LatinCapitalLetterWynn),
'Ǹ' => Ok(LatinExtendedB::LatinCapitalLetterNWithGrave),
'ǹ' => Ok(LatinExtendedB::LatinSmallLetterNWithGrave),
'Ǻ' => Ok(LatinExtendedB::LatinCapitalLetterAWithRingAboveAndAcute),
'ǻ' => Ok(LatinExtendedB::LatinSmallLetterAWithRingAboveAndAcute),
'Ǽ' => Ok(LatinExtendedB::LatinCapitalLetterAeWithAcute),
'ǽ' => Ok(LatinExtendedB::LatinSmallLetterAeWithAcute),
'Ǿ' => Ok(LatinExtendedB::LatinCapitalLetterOWithStrokeAndAcute),
'ǿ' => Ok(LatinExtendedB::LatinSmallLetterOWithStrokeAndAcute),
'Ȁ' => Ok(LatinExtendedB::LatinCapitalLetterAWithDoubleGrave),
'ȁ' => Ok(LatinExtendedB::LatinSmallLetterAWithDoubleGrave),
'Ȃ' => Ok(LatinExtendedB::LatinCapitalLetterAWithInvertedBreve),
'ȃ' => Ok(LatinExtendedB::LatinSmallLetterAWithInvertedBreve),
'Ȅ' => Ok(LatinExtendedB::LatinCapitalLetterEWithDoubleGrave),
'ȅ' => Ok(LatinExtendedB::LatinSmallLetterEWithDoubleGrave),
'Ȇ' => Ok(LatinExtendedB::LatinCapitalLetterEWithInvertedBreve),
'ȇ' => Ok(LatinExtendedB::LatinSmallLetterEWithInvertedBreve),
'Ȉ' => Ok(LatinExtendedB::LatinCapitalLetterIWithDoubleGrave),
'ȉ' => Ok(LatinExtendedB::LatinSmallLetterIWithDoubleGrave),
'Ȋ' => Ok(LatinExtendedB::LatinCapitalLetterIWithInvertedBreve),
'ȋ' => Ok(LatinExtendedB::LatinSmallLetterIWithInvertedBreve),
'Ȍ' => Ok(LatinExtendedB::LatinCapitalLetterOWithDoubleGrave),
'ȍ' => Ok(LatinExtendedB::LatinSmallLetterOWithDoubleGrave),
'Ȏ' => Ok(LatinExtendedB::LatinCapitalLetterOWithInvertedBreve),
'ȏ' => Ok(LatinExtendedB::LatinSmallLetterOWithInvertedBreve),
'Ȑ' => Ok(LatinExtendedB::LatinCapitalLetterRWithDoubleGrave),
'ȑ' => Ok(LatinExtendedB::LatinSmallLetterRWithDoubleGrave),
'Ȓ' => Ok(LatinExtendedB::LatinCapitalLetterRWithInvertedBreve),
'ȓ' => Ok(LatinExtendedB::LatinSmallLetterRWithInvertedBreve),
'Ȕ' => Ok(LatinExtendedB::LatinCapitalLetterUWithDoubleGrave),
'ȕ' => Ok(LatinExtendedB::LatinSmallLetterUWithDoubleGrave),
'Ȗ' => Ok(LatinExtendedB::LatinCapitalLetterUWithInvertedBreve),
'ȗ' => Ok(LatinExtendedB::LatinSmallLetterUWithInvertedBreve),
'Ș' => Ok(LatinExtendedB::LatinCapitalLetterSWithCommaBelow),
'ș' => Ok(LatinExtendedB::LatinSmallLetterSWithCommaBelow),
'Ț' => Ok(LatinExtendedB::LatinCapitalLetterTWithCommaBelow),
'ț' => Ok(LatinExtendedB::LatinSmallLetterTWithCommaBelow),
'Ȝ' => Ok(LatinExtendedB::LatinCapitalLetterYogh),
'ȝ' => Ok(LatinExtendedB::LatinSmallLetterYogh),
'Ȟ' => Ok(LatinExtendedB::LatinCapitalLetterHWithCaron),
'ȟ' => Ok(LatinExtendedB::LatinSmallLetterHWithCaron),
'Ƞ' => Ok(LatinExtendedB::LatinCapitalLetterNWithLongRightLeg),
'ȡ' => Ok(LatinExtendedB::LatinSmallLetterDWithCurl),
'Ȣ' => Ok(LatinExtendedB::LatinCapitalLetterOu),
'ȣ' => Ok(LatinExtendedB::LatinSmallLetterOu),
'Ȥ' => Ok(LatinExtendedB::LatinCapitalLetterZWithHook),
'ȥ' => Ok(LatinExtendedB::LatinSmallLetterZWithHook),
'Ȧ' => Ok(LatinExtendedB::LatinCapitalLetterAWithDotAbove),
'ȧ' => Ok(LatinExtendedB::LatinSmallLetterAWithDotAbove),
'Ȩ' => Ok(LatinExtendedB::LatinCapitalLetterEWithCedilla),
'ȩ' => Ok(LatinExtendedB::LatinSmallLetterEWithCedilla),
'Ȫ' => Ok(LatinExtendedB::LatinCapitalLetterOWithDiaeresisAndMacron),
'ȫ' => Ok(LatinExtendedB::LatinSmallLetterOWithDiaeresisAndMacron),
'Ȭ' => Ok(LatinExtendedB::LatinCapitalLetterOWithTildeAndMacron),
'ȭ' => Ok(LatinExtendedB::LatinSmallLetterOWithTildeAndMacron),
'Ȯ' => Ok(LatinExtendedB::LatinCapitalLetterOWithDotAbove),
'ȯ' => Ok(LatinExtendedB::LatinSmallLetterOWithDotAbove),
'Ȱ' => Ok(LatinExtendedB::LatinCapitalLetterOWithDotAboveAndMacron),
'ȱ' => Ok(LatinExtendedB::LatinSmallLetterOWithDotAboveAndMacron),
'Ȳ' => Ok(LatinExtendedB::LatinCapitalLetterYWithMacron),
'ȳ' => Ok(LatinExtendedB::LatinSmallLetterYWithMacron),
'ȴ' => Ok(LatinExtendedB::LatinSmallLetterLWithCurl),
'ȵ' => Ok(LatinExtendedB::LatinSmallLetterNWithCurl),
'ȶ' => Ok(LatinExtendedB::LatinSmallLetterTWithCurl),
'ȷ' => Ok(LatinExtendedB::LatinSmallLetterDotlessJ),
'ȸ' => Ok(LatinExtendedB::LatinSmallLetterDbDigraph),
'ȹ' => Ok(LatinExtendedB::LatinSmallLetterQpDigraph),
'Ⱥ' => Ok(LatinExtendedB::LatinCapitalLetterAWithStroke),
'Ȼ' => Ok(LatinExtendedB::LatinCapitalLetterCWithStroke),
'ȼ' => Ok(LatinExtendedB::LatinSmallLetterCWithStroke),
'Ƚ' => Ok(LatinExtendedB::LatinCapitalLetterLWithBar),
'Ⱦ' => Ok(LatinExtendedB::LatinCapitalLetterTWithDiagonalStroke),
'ȿ' => Ok(LatinExtendedB::LatinSmallLetterSWithSwashTail),
'ɀ' => Ok(LatinExtendedB::LatinSmallLetterZWithSwashTail),
'Ɂ' => Ok(LatinExtendedB::LatinCapitalLetterGlottalStop),
'ɂ' => Ok(LatinExtendedB::LatinSmallLetterGlottalStop),
'Ƀ' => Ok(LatinExtendedB::LatinCapitalLetterBWithStroke),
'Ʉ' => Ok(LatinExtendedB::LatinCapitalLetterUBar),
'Ʌ' => Ok(LatinExtendedB::LatinCapitalLetterTurnedV),
'Ɇ' => Ok(LatinExtendedB::LatinCapitalLetterEWithStroke),
'ɇ' => Ok(LatinExtendedB::LatinSmallLetterEWithStroke),
'Ɉ' => Ok(LatinExtendedB::LatinCapitalLetterJWithStroke),
'ɉ' => Ok(LatinExtendedB::LatinSmallLetterJWithStroke),
'Ɋ' => Ok(LatinExtendedB::LatinCapitalLetterSmallQWithHookTail),
'ɋ' => Ok(LatinExtendedB::LatinSmallLetterQWithHookTail),
'Ɍ' => Ok(LatinExtendedB::LatinCapitalLetterRWithStroke),
'ɍ' => Ok(LatinExtendedB::LatinSmallLetterRWithStroke),
'Ɏ' => Ok(LatinExtendedB::LatinCapitalLetterYWithStroke),
_ => Err(()),
}
}
}
impl Into<u32> for LatinExtendedB {
fn into(self) -> u32 {
let c: char = self.into();
let hex = c
.escape_unicode()
.to_string()
.replace("\\u{", "")
.replace("}", "");
u32::from_str_radix(&hex, 16).unwrap()
}
}
impl std::convert::TryFrom<u32> for LatinExtendedB {
type Error = ();
fn try_from(u: u32) -> Result<Self, Self::Error> {
if let Ok(c) = char::try_from(u) {
Self::try_from(c)
} else {
Err(())
}
}
}
impl Iterator for LatinExtendedB {
type Item = Self;
fn next(&mut self) -> Option<Self> {
let index: u32 = (*self).into();
use std::convert::TryFrom;
Self::try_from(index + 1).ok()
}
}
impl LatinExtendedB {
/// The character with the lowest index in this unicode block
pub fn new() -> Self {
LatinExtendedB::LatinSmallLetterBWithStroke
}
/// The character's name, in sentence case
pub fn name(&self) -> String {
let s = std::format!("LatinExtendedB{:#?}", self);
string_morph::to_sentence_case(&s)
}
}
|
// Copyright (c) 2015, <daggerbot@gmail.com>
// All rights reserved.
use ::display::{
Atom,
Display,
};
use ::event::{
ClientMessageData,
Event,
};
#[test]
fn test () {
// open display
let mut display;
if let Some(d) = Display::open_default() {
display = d;
} else {
panic!("can't open display");
}
// get atoms
let atom_wm_delete_window = force_intern_atom(&mut display, "WM_DELETE_WINDOW");
let atom_wm_protocols = force_intern_atom(&mut display, "WM_PROTOCOLS");
// create window
let screen_num = display.default_screen();
let root = display.root_window(screen_num);
let border = display.black_pixel(screen_num);
let background = display.white_pixel(screen_num);
let window = display.create_simple_window(root, 0, 0, 640, 480, 0, border, background);
display.store_name(window, "Xlib Test");
display.set_wm_protocols(window, &[atom_wm_delete_window]);
display.map_window(window);
// main loop
loop {
match display.next_event() {
Event::ClientMessage(e) => {
if e.message_type == atom_wm_protocols {
if let ClientMessageData::Long(data) = e.data {
if data[0] == atom_wm_delete_window {
break;
}
}
}
},
_ => {},
}
}
}
fn force_intern_atom (display: &mut Display, name: &str) -> Atom {
if let Some(atom) = display.intern_atom(name, false) {
return atom;
} else {
panic!("failed to retrieve atom: {}", name);
}
}
|
use serde::{Serialize, Deserialize};
#[derive(Debug, Deserialize)]
#[serde(tag = "op")]
pub(super) enum ClientMessage {
#[serde(rename = "voiceUpdate")]
VoiceUpdate(VoiceUpdate),
#[serde(rename = "play")]
PlayTrack(PlayTrack),
#[serde(rename = "stop")]
Stop(Stop),
#[serde(rename = "seek")]
Seek(Seek),
#[serde(rename = "volume")]
SetVolume(SetVolume),
#[serde(rename = "equalizer")]
SetEq(SetEq),
#[serde(rename = "destroy")]
Destroy(Destroy)
}
pub(super) enum ClientResponse {
Stats,
PlayerUpdate(PlayerUpdate),
PlayerEvent(PlayerEvent)
}
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct PlayerUpdate {
pub guild_id: u64,
pub state: serde_json::Value,
}
#[derive(Debug)]
pub(super) enum PlayerEvent {
TrackStartEvent,
TrackEndEvent,
}
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct TrackEndEvent {
pub track: String,
pub reason: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct TrackExceptionEvent {
pub track: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct TrackStuckEvent {
pub track: String,
#[serde(rename = "thresholdMs")]
pub threshold_ms: u64
}
#[derive(Debug, Deserialize)]
pub(super) struct VoiceUpdate {
#[serde(alias = "guildId")]
pub guild_id: u64,
#[serde(alias = "sessionId")]
pub session_id: String,
pub event: VoiceUpdateEvent,
}
#[derive(Debug, Deserialize)]
pub(super) struct VoiceUpdateEvent {
pub endpoint: Option<String>,
pub token: String,
}
#[derive(Debug, Deserialize)]
pub(super) struct PlayTrack {
#[serde(alias = "guildId")]
pub guild_id: u64,
pub track: String,
pub start_time: u32,
pub end_time: u32,
pub volume: u16,
pub no_replace: bool,
pub pause: bool,
}
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct Stop {
#[serde(alias = "guildId")]
pub guild_id: u64,
}
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct Seek {
#[serde(alias = "guildId")]
pub guild_id: u64,
pub position: u64
}
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct SetVolume {
#[serde(alias = "guildId")]
pub guild_id: u64,
pub volume: u16
}
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct SetEq {
#[serde(alias = "guildId")]
pub guild_id: u64,
pub bands: Vec<EqBand>,
}
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct EqBand {
pub band: u8,
pub gain: f32
}
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct Destroy {
#[serde(alias = "guildId")]
pub guild_id: u64,
}
|
use crate::map::sector::Sector;
use crate::map::triangle::Triangle;
use crate::math::util::float_eq;
use crate::math::util::float_zero;
use crate::math::vector::Vector2;
use std::cell::RefCell;
use std::collections::HashSet;
use std::rc::Rc;
struct Polygon {
index: usize,
merge: bool,
perimeter: bool,
point: Vector2,
previous: Vec<Rc<RefCell<Polygon>>>,
next: Vec<Rc<RefCell<Polygon>>>,
}
impl Polygon {
fn new(point: Vector2) -> Self {
Polygon {
index: 0,
merge: false,
perimeter: false,
previous: Vec::new(),
next: Vec::new(),
point,
}
}
}
fn update_polygon_indices(polygons: &Vec<Rc<RefCell<Polygon>>>) {
for (i, polygon) in polygons.iter().enumerate() {
polygon.borrow_mut().index = i
}
}
fn polygon_remove_point(polygons: &mut Vec<Rc<RefCell<Polygon>>>, point: Vector2) {
for i in 0..polygons.len() {
if polygons[i].borrow().point.eq(point) {
polygons.remove(i);
break;
}
}
}
fn polygon_find(polygons: &Vec<Rc<RefCell<Polygon>>>, point: Vector2) -> Option<Rc<RefCell<Polygon>>> {
for polygon in polygons.iter() {
if point.eq(polygon.borrow().point) {
return Some(polygon.clone());
}
}
return Option::None;
}
fn polygon_compare(a: &Polygon, b: &Polygon) -> i32 {
let i = a.point;
let e = b.point;
if i.y < e.y || (float_eq(i.y, e.y) && i.x > e.x) {
return 1;
}
return -1;
}
fn vector_angle(a: Vector2, b: Vector2) -> f32 {
let mut angle = (a.y - b.y).atan2(a.x - b.x);
if angle < 0.0 {
angle += 2.0 * std::f32::consts::PI;
}
return angle;
}
fn vector_interior_angle(first: Vector2, second: Vector2, third: Vector2) -> f32 {
let angle_one = (first.y - second.y).atan2(first.x - second.x);
let angle_two = (second.y - third.y).atan2(second.x - third.x);
let mut interior = angle_two - angle_one;
if interior < 0.0 {
interior += 2.0 * std::f32::consts::PI;
}
return interior;
}
fn triangle_contains(tri: &[Vector2; 3], x: f32, y: f32) -> bool {
let mut odd = false;
let mut k = 2;
for i in 0..3 {
let vi = tri[i];
let vk = tri[k];
if (vi.y > y) != (vk.y > y) {
let value = (vk.x - vi.x) * (y - vi.y) / (vk.y - vi.y) + vi.x;
if x < value {
odd = !odd;
}
}
k = i;
}
odd
}
fn vector_line_intersect(a: Vector2, b: Vector2, c: Vector2, d: Vector2) -> bool {
let a1: f32 = b.y - a.y;
let b1: f32 = a.x - b.x;
let c1: f32 = (b.x * a.y) - (a.x * b.y);
let r3: f32 = (a1 * c.x) + (b1 * c.y) + c1;
let r4: f32 = (a1 * d.x) + (b1 * d.y) + c1;
if !float_zero(r3) && !float_zero(r4) && r3 * r4 >= 0.0 {
return false;
}
let a2: f32 = d.y - c.y;
let b2: f32 = c.x - d.x;
let c2: f32 = (d.x * c.y) - (c.x * d.y);
let r1: f32 = (a2 * a.x) + (b2 * a.y) + c2;
let r2: f32 = (a2 * b.x) + (b2 * b.y) + c2;
if !float_zero(r1) && !float_zero(r2) && r1 * r2 >= 0.0 {
return false;
}
let denominator: f32 = (a1 * b2) - (a2 * b1);
if float_zero(denominator) {
return false;
}
true
}
fn valid_polygon(polygon: &Vec<Rc<RefCell<Polygon>>>, a: Vector2, b: Vector2) -> bool {
for polygon in polygon.iter() {
let c = polygon.borrow().point;
let d = polygon.borrow().previous[0].borrow().point;
if !a.eq(c) && !a.eq(d) && !b.eq(c) && !b.eq(d) && vector_line_intersect(a, b, c, d) {
return false;
}
}
true
}
fn triangle_valid(vectors: &Vec<Vector2>, a: Vector2, b: Vector2, c: Vector2) -> bool {
if vector_interior_angle(a, b, c) > std::f32::consts::PI {
return false;
}
let tri = [a, b, c];
for vec in vectors.iter().copied() {
if vec.eq(a) || vec.eq(b) || vec.eq(c) {
continue;
}
if triangle_contains(&tri, vec.x, vec.y) {
return false;
}
}
true
}
fn polygon_sorted_insert(polygons: &mut Vec<Rc<RefCell<Polygon>>>, point: Vector2) {
let polygon = Polygon::new(point);
for (i, existing) in polygons.iter().enumerate() {
if polygon_compare(&polygon, &existing.borrow()) <= 0 {
polygons.insert(i, Rc::new(RefCell::new(polygon)));
return;
}
}
polygons.push(Rc::new(RefCell::new(polygon)));
}
fn cull_vectors(polygons: &mut Vec<Rc<RefCell<Polygon>>>) {
update_polygon_indices(polygons);
let mut cull = Vec::new();
let mut remaining = Vec::with_capacity(polygons.len());
for polygon in polygons.iter() {
remaining.push(polygon.borrow().index);
}
let mut dead = HashSet::new();
let mut holding = HashSet::new();
let mut pending = HashSet::new();
while remaining.len() > 0 {
let start = remaining[0];
let mut current = start;
loop {
let mut polygon = polygons[current].borrow_mut();
polygon.perimeter = true;
remaining.retain(|&x| x != current);
while polygon.next.len() != 1 {
let next = polygon.next[1].borrow().index;
pending.insert(next);
polygon.next.remove(1);
}
while polygon.previous.len() != 1 {
polygon.previous.remove(1);
}
current = polygon.next[0].borrow().index;
if current == start {
break;
}
}
while pending.len() > 0 {
for polygon_index in pending.iter().copied() {
dead.insert(polygon_index);
let polygon = polygons[polygon_index].borrow();
for ref_next in polygon.next.iter() {
let next = ref_next.borrow();
if !next.perimeter {
if !pending.contains(&next.index) && !dead.contains(&next.index) {
holding.insert(next.index);
}
}
}
}
pending.clear();
for polygon_index in holding.iter().copied() {
pending.insert(polygon_index);
}
holding.clear();
}
for polygon_index in dead.iter().copied() {
for x in 0..remaining.len() {
if remaining[x] == polygon_index {
remaining.remove(x);
break;
}
}
cull.push(polygon_index);
}
dead.clear();
holding.clear();
pending.clear();
}
for polygon_index in cull.iter().copied() {
for x in 0..polygons.len() {
if polygons[x].borrow().index == polygon_index {
polygons.remove(x);
break;
}
}
}
}
fn populate_references(sec: &Sector, polygons: &Vec<Rc<RefCell<Polygon>>>, clockwise: bool) {
let len: usize = sec.vecs.len();
for i in 0..len {
let mut p = if i == 0 { len - 1 } else { i - 1 };
let mut n = if i == len - 1 { 0 } else { i + 1 };
if !clockwise {
let t = p;
p = n;
n = t;
}
let next = polygon_find(polygons, sec.vecs[n]).unwrap();
let previous = polygon_find(polygons, sec.vecs[p]).unwrap();
let ref_original = polygon_find(polygons, sec.vecs[i]).unwrap();
let mut original = ref_original.borrow_mut();
if original.previous.is_empty() {
original.previous.push(previous.clone());
} else {
let point = original.point;
let existing = original.previous[0].borrow().point;
if vector_angle(previous.borrow().point, point) < vector_angle(existing, point) {
original.previous.insert(0, previous);
}
}
if original.next.is_empty() {
original.next.push(next);
} else {
let point = original.point;
let existing = original.next[0].borrow().point;
if vector_angle(next.borrow().point, point) < vector_angle(existing, point) {
original.next.insert(0, next);
}
}
}
}
fn populate_vectors(sec: &Sector, polygons: &mut Vec<Rc<RefCell<Polygon>>>) {
for point in sec.vecs.iter().copied() {
let mut exists = false;
for polygon in polygons.iter() {
if point.eq(polygon.borrow().point) {
exists = true;
break;
}
}
if !exists {
polygon_sorted_insert(polygons, point);
}
}
}
fn skip(sector: &Sector, floor: bool) -> bool {
if floor {
return !sector.has_floor();
}
!sector.has_ceiling()
}
fn populate(sectors: &mut Vec<Sector>, sector: usize, floor: bool, mut polygons: &mut Vec<Rc<RefCell<Polygon>>>) {
let sector = §ors[sector];
for inner in sector.inside.iter().copied() {
let inner = §ors[inner];
if skip(inner, floor) {
continue;
}
populate_vectors(inner, &mut polygons);
}
for inner in sector.inside.iter().copied() {
let inner = §ors[inner];
if skip(inner, floor) {
continue;
}
populate_references(inner, &polygons, false);
}
cull_vectors(&mut polygons);
populate_vectors(sector, &mut polygons);
populate_references(sector, &polygons, true);
update_polygon_indices(&polygons);
}
fn classify(polygons: &Vec<Rc<RefCell<Polygon>>>, monotone: &mut Vec<Rc<RefCell<Polygon>>>) {
let mut merge = Vec::new();
let mut split = Vec::new();
for polygon in polygons.iter() {
let current = polygon.borrow();
let previous = current.previous[0].borrow().point;
let next = current.next[0].borrow().point;
let reflex = vector_interior_angle(previous, current.point, next) > std::f32::consts::PI;
let both_above = previous.y < current.point.y && next.y <= current.point.y;
let both_below = previous.y >= current.point.y && next.y >= current.point.y;
let collinear = next.y == current.point.y;
if both_above && reflex {
monotone.push(polygon.clone());
} else if both_above && !reflex {
if !collinear {
split.push(polygon.clone());
}
} else if both_below && !reflex {
if !collinear {
merge.push(polygon.clone());
}
}
}
for polygon in merge.iter() {
let start = polygon.borrow().index + 1;
let point = polygon.borrow().point;
for k in start..polygons.len() {
let diagonal = &polygons[k];
if valid_polygon(polygons, point, diagonal.borrow().point) {
{
let mut current = polygon.borrow_mut();
current.merge = true;
current.next.push(diagonal.clone());
current.previous.push(diagonal.clone());
}
{
let mut diagonal = diagonal.borrow_mut();
diagonal.next.push(polygon.clone());
diagonal.previous.push(polygon.clone());
}
break;
}
}
}
for polygon in split.iter() {
let current = polygon.borrow();
let start = current.index;
let point = current.point;
for k in (0..start).rev() {
let diagonal = &polygons[k];
if valid_polygon(polygons, point, diagonal.borrow().point) {
if !diagonal.borrow().merge {
monotone.push(diagonal.clone());
{
let mut current = polygon.borrow_mut();
current.next.push(diagonal.clone());
current.previous.push(diagonal.clone());
}
{
let mut diagonal = diagonal.borrow_mut();
diagonal.next.push(polygon.clone());
diagonal.previous.push(polygon.clone());
}
}
break;
}
}
}
}
fn clip(sec: &Sector, floor: bool, scale: f32, triangles: &mut Vec<Triangle>, vecs: &mut Vec<Vector2>) {
let mut i = 0;
let mut size = vecs.len();
while size > 3 {
let plus = if i == size - 1 { 0 } else { i + 1 };
let minus = if i == 0 { size - 1 } else { i - 1 };
let previous = vecs[minus];
let current = vecs[i];
let next = vecs[plus];
if triangle_valid(vecs, previous, current, next) {
let tri;
if floor {
tri = Triangle::new(sec.floor, sec.floor_texture, previous, current, next, floor, scale);
} else {
tri = Triangle::new(sec.ceiling, sec.ceiling_texture, next, current, previous, floor, scale);
}
triangles.push(tri);
vecs.remove(i);
size -= 1;
} else {
i += 1;
}
if i == size {
i = 0;
}
}
let tri;
if floor {
tri = Triangle::new(sec.floor, sec.floor_texture, vecs[0], vecs[1], vecs[2], floor, scale);
} else {
tri = Triangle::new(sec.ceiling, sec.ceiling_texture, vecs[2], vecs[1], vecs[0], floor, scale);
}
triangles.push(tri);
}
fn clip_all(sec: &Sector, floor: bool, scale: f32, monotone: &Vec<Rc<RefCell<Polygon>>>, triangles: &mut Vec<Triangle>) {
let mut vecs = Vec::new();
for start in monotone.iter() {
let mut next = start.borrow().next[0].clone();
let mut current = start.clone();
loop {
let a = next.borrow().point;
let b = current.borrow().point;
vecs.push(b);
let mut angle = std::f32::MAX;
let mut previous = Option::None;
for ref_previous in current.borrow().previous.iter() {
let c = ref_previous.borrow().point;
let angle_1 = (a.x - b.x).atan2(a.y - b.y);
let angle_2 = (b.x - c.x).atan2(b.y - c.y);
let mut interior = angle_2 - angle_1 + std::f32::consts::PI;
if interior < 0.0 {
interior += 2.0 * std::f32::consts::PI;
}
if interior > 2.0 * std::f32::consts::PI {
interior -= 2.0 * std::f32::consts::PI;
}
if interior < angle {
previous = Some(ref_previous.clone());
angle = interior;
}
}
let previous = previous.unwrap();
{
let mut mutate_current = current.borrow_mut();
polygon_remove_point(&mut mutate_current.next, a);
polygon_remove_point(&mut mutate_current.previous, previous.borrow().point);
}
if Rc::ptr_eq(&previous, start) {
break;
}
next = current;
current = previous;
}
clip(sec, floor, scale, triangles, &mut vecs);
vecs.clear();
}
}
fn construct(sectors: &mut Vec<Sector>, sector: usize, floor: bool, scale: f32, triangles: &mut Vec<Triangle>) {
if skip(§ors[sector], floor) {
return;
}
let mut polygons = Vec::new();
let mut monotone = Vec::new();
println!("populate");
populate(sectors, sector, floor, &mut polygons);
println!("classify {}", polygons.len());
classify(&polygons, &mut monotone);
println!("clip {}", monotone.len());
clip_all(§ors[sector], floor, scale, &monotone, triangles);
println!("triangles {}", triangles.len());
for polygon in polygons.iter() {
let mut polygon = polygon.borrow_mut();
polygon.next.clear();
polygon.previous.clear();
}
monotone.clear();
polygons.clear();
println!("---------------------------");
}
pub fn triangulate_sector(sectors: &mut Vec<Sector>, sector: usize, scale: f32) {
let mut triangles: Vec<Triangle> = Vec::new();
construct(sectors, sector, true, scale, &mut triangles);
construct(sectors, sector, false, scale, &mut triangles);
sectors[sector].update_triangles(triangles);
}
|
use std::fs::File;
use std::io::{BufReader, Cursor, Read};
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::path::Path;
use std::time::{Duration, UNIX_EPOCH};
use memmap::Mmap;
use errors::Result;
use pcap::header::Header as FileHeader;
use pcap::packet::Packet as RawPacket;
use pcap::Packet;
/// Open a file as a stream in read-only mode.
pub fn open<'a, P: AsRef<Path>>(path: P) -> Result<Reader<'a, BufReader<File>>> {
let f = File::open(path)?;
read(f)
}
/// Read a stream implements `Read` trait in read-only mode.
pub fn read<'a, R: Read>(read: R) -> Result<Reader<'a, BufReader<R>>> {
Ok(Reader::new(BufReader::new(read)))
}
/// Open a file as immutable memory mapped buffer in read-only mode.
pub fn mmap<'a, P: AsRef<Path>>(path: P) -> Result<Reader<'a, Cursor<Mmap>>> {
let f = File::open(path)?;
let mmap = unsafe { Mmap::map(&f)? };
parse(mmap)
}
/// Parse a buffer implements `AsRef<[u8]>` trait in read-only mode.
pub fn parse<'a, T: AsRef<[u8]>>(buf: T) -> Result<Reader<'a, Cursor<T>>> {
Ok(Reader::new(Cursor::new(buf)))
}
/// The `Reader` struct allows reading packets from a packet capture.
pub struct Reader<'a, R: 'a> {
r: R,
phantom: PhantomData<&'a R>,
}
impl<'a, R: 'a> Reader<'a, R> {
/// Create a new `Reader` that reads the packet capture data from the specified `Reader`.
pub fn new(r: R) -> Self {
Reader {
r,
phantom: PhantomData,
}
}
}
impl<'a, R> Deref for Reader<'a, R> {
type Target = R;
fn deref(&self) -> &Self::Target {
&self.r
}
}
impl<'a, R> DerefMut for Reader<'a, R> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.r
}
}
impl<'a> From<(RawPacket<'a>, i32, bool)> for Packet<'a> {
fn from(args: (RawPacket<'a>, i32, bool)) -> Self {
let (raw_packet, utc_offset, is_nanosecond_resolution) = args;
let secs = u64::from(raw_packet.ts_sec);
let secs = if utc_offset < 0 {
secs.checked_sub(utc_offset.abs() as u64)
} else {
secs.checked_add(utc_offset as u64)
}.unwrap_or_default();
let nanos = if is_nanosecond_resolution {
raw_packet.ts_usec
} else {
raw_packet.ts_usec.checked_mul(1000).unwrap_or_default()
};
Packet {
timestamp: UNIX_EPOCH + Duration::new(secs, nanos % 1_000_000_000),
actual_length: raw_packet.orig_len as usize,
payload: raw_packet.payload,
}
}
}
impl<'a, T> IntoIterator for &'a Reader<'a, Cursor<T>>
where
T: AsRef<[u8]>,
{
type Item = <ParsePackets<'a> as Iterator>::Item;
type IntoIter = ParsePackets<'a>;
fn into_iter(self) -> Self::IntoIter {
ParsePackets::new(self.r.get_ref())
}
}
/// Parse `Packet<'a>` from a read-only buffer.
pub type ParsePackets<'a> = parse::Packets<'a>;
mod parse {
use nom::Endianness;
use super::*;
use pcap::packet::ReadPacketExt;
pub struct Packets<'a> {
state: State<'a>,
}
impl<'a> Packets<'a> {
pub fn new<T: AsRef<[u8]>>(buf: &'a T) -> Packets<'a> {
Packets {
state: State::Init(buf.as_ref()),
}
}
}
enum State<'a> {
Init(&'a [u8]),
Parsing(&'a [u8], Endianness, i32, bool),
Done,
}
impl<'a> Iterator for Packets<'a> {
type Item = Packet<'a>;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.state {
State::Init(remaining) => {
self.state =
if let Ok((remaining, file_header)) = FileHeader::parse(remaining) {
let magic = file_header.magic();
State::Parsing(
remaining,
magic.endianness(),
file_header.thiszone,
magic.is_nanosecond_resolution(),
)
} else {
State::Done
}
}
State::Parsing(
mut remaining,
endianness,
utc_offset,
is_nanosecond_resolution,
) => {
if let Ok(packet) = remaining.read_packet(endianness) {
self.state = State::Parsing(
remaining,
endianness,
utc_offset,
is_nanosecond_resolution,
);
return Some(Packet::from((
packet,
utc_offset,
is_nanosecond_resolution,
)));
}
self.state = State::Done;
}
State::Done => return None,
}
}
}
}
}
impl<'a, R> IntoIterator for Reader<'a, BufReader<R>>
where
R: Read,
{
type Item = <ReadPackets<'a, R> as Iterator>::Item;
type IntoIter = ReadPackets<'a, R>;
fn into_iter(self) -> Self::IntoIter {
ReadPackets::new(self.r)
}
}
/// Read `Packet<'a>` from a read-only stream.
pub type ReadPackets<'a, R> = read::Packets<'a, R>;
mod read {
use std::cell::Cell;
use nom::Endianness;
use super::*;
use pcap::packet::ReadPacketExt;
pub struct Packets<'a, R: 'a> {
state: Cell<State<R>>,
phantom: PhantomData<&'a R>,
}
impl<'a, R: 'a> Packets<'a, R> {
pub fn new(reader: BufReader<R>) -> Self {
Packets {
state: Cell::new(State::Init(reader)),
phantom: PhantomData,
}
}
}
enum State<R> {
Init(BufReader<R>),
Parsing(BufReader<R>, Endianness, i32, bool),
Done,
}
impl<R> Default for State<R> {
fn default() -> Self {
State::Done
}
}
impl<'a, R> Iterator for Packets<'a, R>
where
R: 'a + Read,
{
type Item = Packet<'a>;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.state.take() {
State::Init(mut reader) => {
let mut buf = vec![0; FileHeader::size()];
self.state = Cell::new(
reader
.read_exact(&mut buf)
.map_err(|err| err.into())
.and_then(|_| FileHeader::parse(&buf))
.map(|(_, file_header)| {
let magic = file_header.magic();
State::Parsing(
reader,
magic.endianness(),
file_header.thiszone,
magic.is_nanosecond_resolution(),
)
})
.unwrap_or(State::Done),
);
}
State::Parsing(
mut reader,
endianness,
utc_offset,
is_nanosecond_resolution,
) => {
if let Ok(packet) = reader.read_packet(endianness) {
self.state = Cell::new(State::Parsing(
reader,
endianness,
utc_offset,
is_nanosecond_resolution,
));
return Some(Packet::from((
packet,
utc_offset,
is_nanosecond_resolution,
)));
}
}
State::Done => {
return None;
}
}
}
}
}
}
#[cfg(test)]
mod tests {
use std::borrow::Cow;
use super::*;
use pcap::tests::PACKETS;
#[test]
pub fn test_read() {
for (buf, magic) in PACKETS.iter() {
let mut packets = read(*buf).unwrap().into_iter();
let packet = packets.next().unwrap();
let ts = packet.timestamp.duration_since(UNIX_EPOCH).unwrap();
assert_eq!(ts.as_secs(), 0x56506e1a);
assert_eq!(
ts.subsec_nanos(),
if magic.is_nanosecond_resolution() {
0x182b0ad0
} else {
0
}
);
assert_eq!(packet.actual_length, 60);
assert_eq!(packet.payload, Cow::from(&[0x44u8, 0x41, 0x54, 0x41][..]));
assert!(packets.next().is_none());
}
}
#[test]
pub fn test_parse() {
for (buf, magic) in PACKETS.iter() {
let reader = parse(buf).unwrap();
let mut packets = reader.into_iter();
let packet = packets.next().unwrap();
let ts = packet.timestamp.duration_since(UNIX_EPOCH).unwrap();
assert_eq!(ts.as_secs(), 0x56506e1a);
assert_eq!(
ts.subsec_nanos(),
if magic.is_nanosecond_resolution() {
0x182b0ad0
} else {
0
}
);
assert_eq!(packet.actual_length, 60);
assert_eq!(packet.payload, Cow::from(&[0x44u8, 0x41, 0x54, 0x41][..]));
assert!(packets.next().is_none());
}
}
}
|
use std::fmt::Debug;
use async_trait::async_trait;
use bonsaidb_core::{custom_api::CustomApi, permissions::Dispatcher};
use crate::{server::ConnectedClient, CustomServer};
/// Tailors the behavior of a server to your needs.
#[async_trait]
pub trait Backend: Debug + Send + Sync + Sized + 'static {
/// The custom API definition. If you do not wish to have an API, `()` may be provided.
type CustomApi: CustomApi;
/// The type that implements the [`Dispatcher`](bonsaidb_core::permissions::Dispatcher) trait.
type CustomApiDispatcher: Dispatcher<
<Self::CustomApi as CustomApi>::Request,
Result = anyhow::Result<<Self::CustomApi as CustomApi>::Response>,
> + Debug;
/// Returns a dispatcher to handle custom api requests. The `server` and
/// `client` parameters are provided to allow the dispatcher to have access
/// to them when handling the individual actions.
fn dispatcher_for(
server: &CustomServer<Self>,
client: &ConnectedClient<Self>,
) -> Self::CustomApiDispatcher;
// TODO: add client connections events, client errors, etc.
/// A client disconnected from the server. This is invoked before authentication has been performed.
#[allow(unused_variables)]
#[must_use]
async fn client_connected(
client: &ConnectedClient<Self>,
server: &CustomServer<Self>,
) -> ConnectionHandling {
println!(
"{:?} client connected from {:?}",
client.transport(),
client.address()
);
ConnectionHandling::Accept
}
/// A client disconnected from the server.
#[allow(unused_variables)]
async fn client_disconnected(client: ConnectedClient<Self>, server: &CustomServer<Self>) {
println!(
"{:?} client disconnected ({:?})",
client.transport(),
client.address()
);
}
/// A client successfully authenticated.
#[allow(unused_variables)]
async fn client_authenticated(client: ConnectedClient<Self>, server: &CustomServer<Self>) {
println!(
"{:?} client authenticated as user: {}",
client.transport(),
client.user_id().await.unwrap()
);
}
}
impl Backend for () {
type CustomApi = ();
type CustomApiDispatcher = NoDispatcher;
fn dispatcher_for(
_server: &CustomServer<Self>,
_client: &ConnectedClient<Self>,
) -> Self::CustomApiDispatcher {
NoDispatcher
}
}
// This needs to be pub because of the impl, but the user doesn't need to know
// about this type.
#[doc(hidden)]
#[derive(Debug)]
pub struct NoDispatcher;
#[async_trait]
impl actionable::Dispatcher<()> for NoDispatcher {
type Result = anyhow::Result<()>;
async fn dispatch(&self, _permissions: &actionable::Permissions, _request: ()) -> Self::Result {
Ok(())
}
}
/// Controls how a server should handle a connection.
pub enum ConnectionHandling {
/// The server should accept this connection.
Accept,
/// The server should reject this connection.
Reject,
}
|
use quote::Tokens;
use quote::ToTokens;
pub enum Pat {
Wild,
Ident(&'static str),
}
impl ToTokens for Pat {
fn to_tokens(&self, tokens: &mut Tokens) {
match *self {
Pat::Wild => {
tokens.append("_");
}
Pat::Ident(ref ident) => {
tokens.append(ident);
}
}
}
}
pub trait __ExprBlock<T> {
fn __expr(&mut self, expr: T) -> T;
fn __stmnt_local(&mut self, pat: Pat, expr: T) -> T;
}
pub trait __ExprTuple<T> {
type Output;
fn __expr(&mut self, tup: T) -> Self::Output;
}
pub trait __Ref<'a, T> {
type Output;
fn __ref(&'a self) -> Self::Output;
}
pub trait __RefMut<'a, T> {
type Output;
fn __mut(&'a mut self) -> Self::Output;
}
|
use std::{
fmt::{Debug, Display},
hint::unreachable_unchecked,
str::FromStr,
};
use lazy_static::lazy_static;
use regex::Regex;
use super::Range;
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct NumberVolume {
number: Option<Range>,
volume: Option<Range>,
}
impl FromStr for NumberVolume {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
lazy_static! {
static ref NUM_RE: Regex =
Regex::new(r"N(?:úmeros?)?[.:]? ?((?:[\-e]? *\d+ *)+)").unwrap();
static ref VOL_RE: Regex =
Regex::new(r"V(?:olumes?)?[.:]? ?((?:[\-e]? *(?:\d+|[XIV]+))+)").unwrap();
}
let num_captures = NUM_RE.captures(value);
let vol_captures = VOL_RE.captures(value);
match (num_captures, vol_captures) {
(None, None) => Err(()),
(num, vol) => Ok(NumberVolume {
number: num
.map(|cap| cap.get(1).unwrap().as_str().parse().ok())
.flatten(),
volume: vol
.map(|cap| cap.get(1).unwrap().as_str().parse().ok())
.flatten(),
}),
}
}
}
impl Display for NumberVolume {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match (&self.volume, &self.number) {
// Safity: NumberVolume struct ensure at least one of number or volume is Some
(None, None) => unsafe { unreachable_unchecked() },
(Some(n), Some(v)) => write!(f, "n.~{}, v.~{}", n, v),
(None, Some(v)) => write!(f, "v.~{}", v),
(Some(n), None) => write!(f, "n.~{}", n),
}
}
}
impl Debug for NumberVolume {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
<Self as Display>::fmt(self, f)
}
}
#[test]
fn it_works() {
let cases = [
(
"Número 1 e 2. Volume 18",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some(18.into()),
}),
),
(
"Números: 1 e 2 Volume: XVIII- XIX",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some((18..=19).into()),
}),
),
(
"Número 1 e 2. Volume 16",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some(16.into()),
}),
),
(
"Volumes XI-XII-XIII",
Ok(NumberVolume {
number: None,
volume: Some((11..=13).into()),
}),
),
(
"Números 1 e 2. Volume 7",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some(7.into()),
}),
),
(
"N.1-2 V.18-19",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some((18..=19).into()),
}),
),
(
"Volume IX - Número 1 e 2",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some(9.into()),
}),
),
(
"N.2 V.5",
Ok(NumberVolume {
number: Some(2.into()),
volume: Some(5.into()),
}),
),
(
"Número 1 e 2. Volume 8",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some(8.into()),
}),
),
(
"Números: 1 e 2 Volume: XIV",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some(14.into()),
}),
),
(
"Número 2. Volume 6",
Ok(NumberVolume {
number: Some(2.into()),
volume: Some(6.into()),
}),
),
(
"Volume X - Número 1 e 2",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some(10.into()),
}),
),
(
"N.1 e 2 V.4",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some(4.into()),
}),
),
(
"N.1 V.5",
Ok(NumberVolume {
number: Some(1.into()),
volume: Some(5.into()),
}),
),
(
"N.2 V.2",
Ok(NumberVolume {
number: Some(2.into()),
volume: Some(2.into()),
}),
),
(
"N.1 V.2",
Ok(NumberVolume {
number: Some(1.into()),
volume: Some(2.into()),
}),
),
(
"Números 1 e 2. Volume 9",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some(9.into()),
}),
),
(
"Número 1 e 2. Volume 9",
Ok(NumberVolume {
number: Some((1..=2).into()),
volume: Some(9.into()),
}),
),
(
"N.1 V.1",
Ok(NumberVolume {
number: Some(1.into()),
volume: Some(1.into()),
}),
),
];
for (res, expec) in cases.iter().map(|(s, exp)| (s.parse(), exp)) {
assert_eq!(res, *expec);
}
}
|
use model::*;
use idalloc::IdAlloc;
use std;
use std::mem;
use std::sync::{mpsc, Mutex, Condvar, Arc};
use std::collections::HashMap;
use std::any::Any;
#[derive(Debug, PartialEq, Eq, Hash)]
struct Handle(usize);
#[derive(Debug, Clone, PartialEq)]
pub struct JustSignal;
/// The most basic of changes, which just swaps the value with another
#[derive(Debug, Clone, PartialEq)]
pub struct JustSignalChange<ST>(pub Option<ST>);
impl<ST: 'static> Change for JustSignalChange<ST> where
ST: std::cmp::PartialEq + Clone + std::fmt::Debug + Send
{
type SignalType = ST;
fn do_nothing() -> Self {
JustSignalChange(None)
}
}
impl<ST: 'static> Changeable<JustSignalChange<ST>> for JustSignal where
ST: std::cmp::PartialEq + Clone + std::fmt::Debug + Send
{
fn changeable_apply(&mut self, change: JustSignalChange<ST>, watcher: &mut Watcher<ST>) {
if let Some(signal_value) = change.0 {
watcher.send_signal(signal_value);
}
}
fn reset_view_signals(&self) -> Vec<ST> {
vec![]
}
}
/*struct Task<ST> {
handle: Handle,
name: String,
cancellable: bool,
cancelling: bool,
progress: f64,
progress_max: f64,
signal: JustSignal<String>,
}
impl_changeable_struct!{TaskChange<ST>[TaskSignal<ST>] for Task<ST>:
Name name: ValueChange<String>,
Cancellable cancellable: ValueChange<bool>,
Cancelling cancelling: ValueChange<bool>,
Progress progress: ValueChange<f64>,
ProgressMax progress_max: ValueChange<f64>,
Signal signal: JustSignalChange<ST>,
}*/
#[derive(Debug)]
pub enum ApplyHandleMessage {
/// Apply the given change (wrapped in Any) using the ChangeConstructor with the ID.
Apply(usize, Box<Any + Send>),
/// Apply the given changes (wrapped in Any) all at once using the ChangeConstructor with the ID.
ApplyAll(usize, Vec<Box<Any + Send>>),
/// Indicate that an apply handle was cloned for the ChangeConstructor with the given ID, so
/// increment its reference count.
Clone(usize),
/// Indicate that an apply handle was dropped for the ChangeConstructor with the given ID, so
/// decrement its reference count.
Drop(usize),
}
pub struct ApplyHandleAny {
id: usize,
async_change_queue_send: mpsc::Sender<ApplyHandleMessage>,
async_change_notifier: AsyncChangeNotifier,
}
impl ApplyHandleAny {
fn new(id: usize, async_change_queue_send: mpsc::Sender<ApplyHandleMessage>, async_change_notifier: AsyncChangeNotifier) -> ApplyHandleAny {
ApplyHandleAny { id, async_change_queue_send, async_change_notifier }
}
pub fn invoke<C: 'static + Send>(&self, change: C) {
self.async_change_queue_send.send(ApplyHandleMessage::Apply(self.id, Box::new(change) as Box<Any + Send>));
self.async_change_notifier.notify();
}
pub fn invoke_all<C: 'static + Send>(&self, changes: Vec<C>) {
let changes_any = changes.into_iter().map(|change| Box::new(change) as Box<Any + Send>).collect();
self.async_change_queue_send.send(ApplyHandleMessage::ApplyAll(self.id, changes_any));
self.async_change_notifier.notify();
}
}
impl Clone for ApplyHandleAny {
fn clone(&self) -> ApplyHandleAny {
self.async_change_queue_send.send(ApplyHandleMessage::Clone(self.id));
ApplyHandleAny {
id: self.id,
async_change_queue_send: self.async_change_queue_send.clone(),
async_change_notifier: self.async_change_notifier.clone(),
}
}
}
impl Drop for ApplyHandleAny {
fn drop(&mut self) {
self.async_change_queue_send.send(ApplyHandleMessage::Drop(self.id));
}
}
#[derive(Clone)]
pub struct ApplyHandle<C> {
apply_handle_any: ApplyHandleAny,
phantom: std::marker::PhantomData<C>,
}
impl<C: 'static + Send> ApplyHandle<C> {
fn new(apply_handle_any: ApplyHandleAny) -> ApplyHandle<C> {
ApplyHandle {
apply_handle_any,
phantom: std::marker::PhantomData,
}
}
pub fn invoke(&self, change: C) {
self.apply_handle_any.invoke(change);
}
}
/*struct TransactionScope<T: Changeable<C>, C: Change> {
apply_context: Option<&ApplyContext<T, C>>
}*/
/// An ApplyContext is something that can be used to apply changes to a model.
pub trait ApplyContext<T: Changeable<C>, C: Change> {
/// Apply the given change to the model.
fn apply(&mut self, change: C);
// TODO: This mechanism doesn't account for ignoring nested transactions!
/// Make a new transaction if this is part of an revertable model.
fn new_transaction(&mut self, name: String);
/// Create an ApplyHandleAny that should use the given constructor to build a change.
fn apply_handle_any(&mut self, constructor: Box<ChangeConstructor<C>>) -> ApplyHandleAny;
/// Creates an apply handle that can be used to asynchronously apply changes using the same
/// context as this ApplyContext. This uses apply_handle_any, and does does not need to be
/// implemented manually.
fn apply_handle(&mut self) -> ApplyHandle<C> {
let any_handle = self.apply_handle_any(Box::new(LeafChangeConstructor::new()));
ApplyHandle::new(any_handle)
}
}
pub trait Object<C: Change> {
fn update(cxt: &mut ApplyContext<C>, signal: &C::SignalType) {}
//fn event(cxt: &mut ApplyType, event: EventType) {}
}
/// This macro is used to dispatch signals to their appropriate sub-structs.
///
/// # Example
///
/// ```
/// dispatch_struct_update!{AppUiChange[AppUiSignal] for signal, cxt:
/// item_data: ItemData,
/// input_field: TextField,
/// }
/// ```
#[macro_export] macro_rules! dispatch_struct_update{
($change_name:ident[$signal_name:ident] for $signal:tt, $cxt:tt:
$($field_name:ident: $type:ty,)+
) => {
match *$signal {
$(
$signal_name::$field_name(ref subsignal) => {
type T = $type;
<T as Object<_, _>>::update(
&mut SubApplyContext::new(
$cxt,
&|model| &model.$field_name,
&mut |change| $change_name::$field_name(change),
//&|| Box::new(|change| $change_name::$field_name(change))
&|sub_constructor| Box::new(SubChangeConstructor::new(
sub_constructor,
//fn new(create_fn: fn(&std::any::Any) -> C, update_fn: fn(&C) -> bool) -> FnChangeConstructor<C> {
|sub_constructor, leaf_change| {
let sub_change = sub_constructor.create(leaf_change);
$change_name::$field_name(sub_change)
},
|sub_constructor, change| {
if let $change_name::$field_name(ref sub_change) = *change {
sub_constructor.update(sub_change)
} else {
true
}
},
|sub_constructor| format!("{}/{}", stringify!($field_name), sub_constructor.debug_string())
))
),
subsignal
);
}
)*
_ => {}
}
};
// This allows for not including a final trailing comma
($change_name:ident[$signal_name:ident] for $signal:tt:
$($field_name:ident: $type:ty),+
) => (
dispatch_struct_update!{$change_name[$signal_name] for $signal:
$($field_name: $type,)*
}
)
}
// Any acceptable Changeable type can be passed as an ApplyContext
// without being wrapped in anything else first.
/*impl<T: Changeable<C>, C> ApplyContext<T, C> for T {
fn get(&self) -> &T {
self
}
fn apply(&mut self, change: C) {
self.changeable_apply(change);
}
fn new_transaction(&mut self, _: String) {
// This ApplyContext is not undoable, so just ignore new transactions
}
}*/
pub trait Validator<T: Changeable<C>, C: Change> {
#[must_use]
fn validate(&mut self, cxt: &mut ApplyContext<T, C>, changes: &Vec<C>) -> Result<(), String>;
}
pub struct NoValidator;
impl<T: Changeable<C>, C: Change> Validator<T, C> for NoValidator {
fn validate(&mut self, cxt: &mut ApplyContext<T, C>, changes: &Vec<C>) -> Result<(), String> { Ok(()) }
}
/*struct ValidatorApplier<'t, T: 't+Changeable<C>, C: 't+Change> {
manager_data: &'t mut ManagerData<T, C>,
}
impl<'t, T: Changeable<C>, C: Change+std::fmt::Debug+std::clone::Clone> ApplyContext<T, C> for ValidatorApplier<'t, T, C> {
fn get(&self) -> &T {
&self.manager_data.model
}
fn apply(&mut self, change: C) {
let signal_queue = &mut self.manager_data.signal_queue;
let mut watcher_fn = |signal| {
signal_queue.push(signal);
};
self.manager_data.model.changeable_apply(change.clone(), &mut SubWatcher::new(&mut watcher_fn));
//self.manager_data.change_queue.push(change);
}
fn apply_handle_any(&self, constructor: Box<ChangeConstructor<C>>) -> ApplyHandleAny {
//ApplyHandleAny::new(0, self.)
}
fn new_transaction(&mut self, name: String) {
// Ignore new transactions during validation
}
/*fn get_handle() -> Handle {
Handle(0)
}*/
}*/
struct AsyncChangeNotifierInternal {
mutex: Mutex<()>,
condvar: Condvar,
}
#[derive(Clone)]
pub struct AsyncChangeNotifier {
internal: Arc<AsyncChangeNotifierInternal>,
}
impl AsyncChangeNotifier {
fn new() -> AsyncChangeNotifier {
AsyncChangeNotifier {
internal: Arc::new(AsyncChangeNotifierInternal {
mutex: Mutex::new(()),
condvar: Condvar::new(),
})
}
}
pub fn notify(&self) {
self.internal.condvar.notify_all();
}
pub fn wait(&self) {
self.internal.condvar.wait(self.internal.mutex.lock().unwrap()).unwrap();
}
}
/// Internal data for Manager.
struct ManagerData<T: Changeable<C>, C: Change> {
/// The managed model.
model: T,
/// Signals yet to be sent to the view.
signal_queue: Vec<C::SignalType>,
//handles: HashMap<Handle, C::SignalType>,
/// ID allocator for ChangeConstructors. If the option is None, then the constructor became
/// invalid and was removed.
change_constructors: IdAlloc<(Option<Box<ChangeConstructor<C>>>, usize)>,
}
impl<T, C> ManagerData<T, C> where
T: Changeable<C>,
C: 'static + Change + std::fmt::Debug + std::clone::Clone,
{
fn apply_change(&mut self, change: C) {
let signal_queue = &mut self.signal_queue;
let mut watcher_fn = |signal| {
signal_queue.push(signal);
};
self.model.changeable_apply(change.clone(), &mut SubWatcher::new(&mut watcher_fn));
self.change_constructors.apply_to_all_mut(&mut |_, &mut (ref mut opt_change_constructor, _)| {
let still_valid = if let Some(ref mut change_constructor) = *opt_change_constructor {
change_constructor.update(&change)
} else {
true
};
if !still_valid {
*opt_change_constructor = None;
}
});
}
fn process_apply_handle_message(&mut self, message: ApplyHandleMessage) {
match message {
ApplyHandleMessage::Apply(id, any_change) => {
let mut opt_change = None;
if let (Some(ref change_constructor), _) = *self.change_constructors.get(id) {
opt_change = Some(change_constructor.create(any_change));
} else {
println!("Invalidated apply handle was called");
}
if let Some(change) = opt_change {
//println!("Applying {:?}: {:?}", id, change);
self.apply_change(change);
}
}
ApplyHandleMessage::ApplyAll(id, any_changes) => {
any_changes.into_iter().map(|any_change| {
let mut opt_change = None;
if let (Some(ref change_constructor), _) = *self.change_constructors.get(id) {
opt_change = Some(change_constructor.create(any_change));
} else {
println!("Invalidated apply handle was called");
}
if let Some(change) = opt_change {
//println!("Applying {:?}: {:?}", id, change);
self.apply_change(change);
}
});
}
ApplyHandleMessage::Clone(id) => {
println!("Clone {}", id);
let (_, ref mut ref_count) = *self.change_constructors.get_mut(id);
*ref_count += 1;
}
ApplyHandleMessage::Drop(id) => {
println!("Drop {:?}", id);
let mut should_dealloc = false;
{
let (_, ref mut ref_count) = *self.change_constructors.get_mut(id);
*ref_count -= 1;
if *ref_count == 0 {
should_dealloc = true;
}
}
if should_dealloc {
println!("Dealloc {:?}", id);
self.change_constructors.deallocate(id);
}
}
}
}
}
// To modify the model or listen to modifications of the model, you need access
// to the model's manager.
pub struct Manager<T: Changeable<C>, C: Change, V: Validator<T, C>> {
// This data is separated so that it can be passed to the validator
data: ManagerData<T, C>,
/// Asynchronous change queue.
async_change_queue_recv: mpsc::Receiver<ApplyHandleMessage>,
async_change_queue_send: mpsc::Sender<ApplyHandleMessage>,
async_change_notifier: AsyncChangeNotifier,
// While the validator is in use, the other stuff still needs to be
// appliable. Solution: make a ValidatedManager and a non-validated Manager
validator: V,
}
impl<T, C, V> Manager<T, C, V> where
T: Changeable<C> + Object<T, C>,
C: 'static + Change + std::fmt::Debug + std::clone::Clone,
V: Validator<T, C>,
{
pub fn new(model: T, validator: V) -> Manager<T, C, V> {
let (async_change_queue_send, async_change_queue_recv) = mpsc::channel();
Manager {
data: ManagerData {
model,
signal_queue: vec![],
//handles: HashMap::new(),
change_constructors: IdAlloc::new(),
},
async_change_queue_send,
async_change_queue_recv,
async_change_notifier: AsyncChangeNotifier::new(),
validator,
}
}
/// This destroys the manager forever, returning the internal model as value
pub fn take_model(self) -> T {
self.data.model
}
/// Gets a list of all signals that need to be emitted to refresh the state
/// of a view watching this manager.
pub fn reset_view_signals(&self) -> Vec<C::SignalType> {
self.data.model.reset_view_signals()
}
/*pub fn apply_option(&mut self, optional_change: Option<C>) {
if let Some(change) = optional_change {
self.apply(change);
}
}*/
pub fn process_async_changes(&mut self) {
for message in self.async_change_queue_recv.iter() {
self.data.process_apply_handle_message(message);
}
}
pub fn try_process_async_changes(&mut self) {
for message in self.async_change_queue_recv.try_iter() {
self.data.process_apply_handle_message(message);
}
}
pub fn get_async_change_notifier(&self) -> AsyncChangeNotifier {
self.async_change_notifier.clone()
}
/// Updates the view with all queued signals.
pub fn resolve_signals(&mut self) {
loop {
let signal_queue = self.take_signal_queue();
if signal_queue.len() == 0 {
break;
}
for signal in signal_queue {
//println!("Signal: {:?}", signal);
T::update(self, &signal);
}
}
}
/// Returns the list of currently queued signals and empties the internal
/// queue.
pub fn take_signal_queue(&mut self) -> Vec<C::SignalType> {
mem::replace(&mut self.data.signal_queue, vec![])
}
fn validate_changes(&mut self, changes: &Vec<C>) {
// TODO: Make the validator function run after a transaction has
// completed, and allow the validator to fail, causing it to undo all
// changes in that transaction and discard them
/*let mut applier = ValidatorApplier {
manager_data: &mut self.data,
};
self.validator.validate(&mut applier, changes);*/
}
}
impl<T, C, V> ApplyContext<T, C> for Manager<T, C, V> where
T: Changeable<C>,
C: 'static + Change + std::fmt::Debug + std::clone::Clone,
V: Validator<T, C>,
{
fn apply(&mut self, change: C) {
//println!("{:?}", change);
self.data.apply_change(change);
//self.validate_change(&change);
//self.data.change_queue.push(change);
}
fn new_transaction(&mut self, name: String) {
// There is no undoing in managers, so ignore new transaction requests
}
fn apply_handle_any(&mut self, constructor: Box<ChangeConstructor<C>>) -> ApplyHandleAny {
let debug_string = constructor.debug_string();
let (_, id) = self.data.change_constructors.allocate((Some(constructor), 1));
println!("Creating apply handle({}): /{}", id, debug_string);
let new_send = self.async_change_queue_send.clone();
let async_change_notifier = self.async_change_notifier.clone();
/*Box::new(move |leaf_change| {
new_send.send();
async_change_notifier.notify();
})*/
ApplyHandleAny::new(id, new_send, async_change_notifier)
}
/*fn apply_handle(&self) -> Box<Fn(C) + Send> {
let new_send = self.async_change_queue_send.clone();
let async_change_notifier = self.async_change_notifier.clone();
Box::new(move |change| {
println!("Change: {:?}", change);
new_send.send(change).unwrap();
async_change_notifier.notify();
})
}*/
/*fn get_handle() -> Handle {
Handle(0)
}*/
}
/*struct Message<'t, 'c, T: Revertable<C>+'t, C: 'c> {
data: &'t T,
change: &'c C,
}
trait View<T: Revertable<C>, C> {
fn dispatch(message: Message<T, C>) {
}
}*/
// 'p is the parent's lifetime, 'c is the child-item's lifetime. T is for the
// represented model type, C is for the type T has Changeable implemented
// for. PT and PC are the respective equivalent types for the parent's context.
pub struct SubApplyContext<'p, PT: 'p + Changeable<PC>, PC: 'static + Change, T: 'p + Changeable<C>, C: 'static + Change> {
parent_context: &'p mut ApplyContext<PT, PC>,
wrap_fn: &'p Fn(C) -> PC,
//box_wrap_fn: &'p Fn() -> Box<Fn(C) -> PC + Send>,
wrap_constructor_fn: &'p Fn(Box<ChangeConstructor<C>>) -> Box<ChangeConstructor<PC>>,
//apply_handle_fn: &'p Fn(C) -> Box<Fn(PC)>,
//handle_fn: &'p Fn(C) -> Handle,
}
impl<'p, PT: 'p+Changeable<PC>, PC: 'static + Change, T: Changeable<C>, C: 'static + Change> SubApplyContext<'p, PT, PC, T, C> {
pub fn new(
parent_context: &'p mut ApplyContext<PT, PC>,
wrap_fn: &'p Fn(C) -> PC,
//box_wrap_fn: &'p Fn() -> Box<Fn(C) -> PC + Send>,
wrap_constructor_fn: &'p Fn(Box<ChangeConstructor<C>>) -> Box<ChangeConstructor<PC>>,
) -> SubApplyContext<'p, PT, PC, T, C> {
SubApplyContext {
parent_context,
wrap_fn,
//box_wrap_fn,
wrap_constructor_fn,
}
}
}
impl<'p, PT: 'p+Changeable<PC>, PC: 'static + Change, T: Changeable<C>, C: 'static + Change> ApplyContext<T, C> for SubApplyContext<'p, PT, PC, T, C> {
fn apply(&mut self, change: C) {
self.parent_context.apply((self.wrap_fn)(change));
}
fn apply_handle_any(&mut self, constructor: Box<ChangeConstructor<C>>) -> ApplyHandleAny {
let parent_constructor = (self.wrap_constructor_fn)(constructor);
self.parent_context.apply_handle_any(parent_constructor)
}
/*fn apply_handle(&self) -> Box<Fn(C) + Send> {
let parent_handle = self.parent_context.apply_handle();
let wrap_fn = (self.box_wrap_fn)();
Box::new(move |change| {
parent_handle(wrap_fn(change));
})
}*/
fn new_transaction(&mut self, name: String) {
// Because this SubApplyContext consumes changes, it can't undo,
// therefore new transactions are ignored
}
}
/// eg. `sub_apply!(cxt, AppUiChange::title_field.TextFieldChange::item_data)`
#[macro_export] macro_rules! sub_apply {
(@impl ($($change_name:tt)*) $change_type:ident::$field_name:ident $($sub_change_type:ident::$sub_field_name:ident)+) => {
$change_type::$field_name(sub_apply!(@impl ($($change_name)*) $($sub_change_type::$sub_field_name)*))
};
(@impl ($($change_name:tt)*) $change_type:ident::$field_name:ident) => {
$change_type::$field_name($($change_name)*)
};
($cxt:ident, $change_type:ident::$field_name:ident $(.$sub_change_type:ident::$sub_field_name:ident)*) => {
&mut SubApplyContext::new(
$cxt,
&|model| &model.$field_name $(.$sub_field_name)*,
&|change| sub_apply!(@impl (change) $change_type::$field_name $($sub_change_type::$sub_field_name)*),
//&|| Box::new(|change| sub_apply!(@impl change $change_type::$field_name $($sub_change_type::$sub_field_name)*))
&|sub_constructor| Box::new(SubChangeConstructor::new(
sub_constructor,
//fn new(create_fn: fn(&std::any::Any) -> C, update_fn: fn(&C) -> bool) -> FnChangeConstructor<C> {
|sub_constructor, leaf_change| {
let sub_change = sub_constructor.create(leaf_change);
//$change_type::$field_name(sub_change)
sub_apply!(@impl (sub_change) $change_type::$field_name $($sub_change_type::$sub_field_name)*)
},
|sub_constructor, change| {
//if let $change_type::$field_name(ref sub_change) = change {
if let sub_apply!(@impl (ref sub_change) $change_type::$field_name $($sub_change_type::$sub_field_name)*) = *change {
sub_constructor.update(sub_change)
} else {
true
}
},
|sub_constructor| format!("{}/{}", stringify!($field_name).to_string() $(+ "/" + stringify!($sub_field_name))*, sub_constructor.debug_string())
))
)
};
}
pub struct RevertableSubApplyContext<'p, 'c, PT: 'p + Revertable<PC>, PC: 'static + Change, T: 'p + Revertable<C>, C: 'static + Change> {
parent_context: &'p mut ApplyContext<PT, PC>,
model: &'p T,
wrap_fn: &'c Fn(C) -> PC,
//box_wrap_fn: &'p Fn() -> Box<Fn(C) -> PC>,
wrap_constructor_fn: &'p Fn(Box<ChangeConstructor<C>>) -> Box<ChangeConstructor<PC>>,
new_transaction_fn: &'c mut Fn(String),
}
impl<'p, 'c, PT: Revertable<PC>, PC: 'static + Change, T: Revertable<C>, C: 'static + Change> RevertableSubApplyContext<'p, 'c, PT, PC, T, C> {
pub fn new(
parent_context: &'p mut ApplyContext<PT, PC>,
model: &'p T,
wrap_fn: &'c Fn(C) -> PC,
//box_wrap_fn: &'p Fn() -> Box<Fn(C) -> PC>,
wrap_constructor_fn: &'p Fn(Box<ChangeConstructor<C>>) -> Box<ChangeConstructor<PC>>,
new_transaction_fn: &'c mut Fn(String)
) -> RevertableSubApplyContext<'p, 'c, PT, PC, T, C> {
RevertableSubApplyContext {
parent_context,
model,
wrap_fn,
//box_wrap_fn,
wrap_constructor_fn,
new_transaction_fn,
}
}
}
impl<'p, 'c, PT: Revertable<PC>, PC: 'static + Change, T: Revertable<C>, C: 'static + Change> ApplyContext<T, C> for RevertableSubApplyContext<'p, 'c, PT, PC, T, C> {
fn apply(&mut self, change: C) {
self.parent_context.apply((self.wrap_fn)(change));
}
fn apply_handle_any(&mut self, constructor: Box<ChangeConstructor<C>>) -> ApplyHandleAny {
let parent_constructor = (self.wrap_constructor_fn)(constructor);
self.parent_context.apply_handle_any(parent_constructor)
}
/*fn apply_handle(&self) -> Box<Fn(C) + Send> {
//let parent_handle = self.parent_context.apply_handle();
Box::new(move |change| {
//parent_handle((self.apply_handle_fn)(change));
})
}*/
fn new_transaction(&mut self, name: String) {
(self.new_transaction_fn)(name);
}
}
/*trait View {
fn dispatch(Revertable) ->
}*/
|
//! Gameboard view.
use graphics::types::Color;
use graphics::{Context, Graphics};
use graphics::character::CharacterCache;
use GameboardController;
/// Stores gameboard view settings.
pub struct GameboardViewSettings {
/// Position from left-top corner.
pub position: [f64; 2],
/// Size of gameboard along horizontal and vertical edge.
pub size: f64,
/// Background color.
pub black_background_color: Color,
/// White tile background color.
pub white_background_color: Color,
/// Border color.
pub border_color: Color,
/// Edge color around the whole board.
pub board_edge_color: Color,
/// Edge color between the 3x3 sections.
pub section_edge_color: Color,
/// Edge color between cells.
pub cell_edge_color: Color,
/// Edge radius around the whole board.
pub board_edge_radius: f64,
/// Edge radius between the 3x3 sections.
pub section_edge_radius: f64,
/// Edge radius between cells.
pub cell_edge_radius: f64,
/// Color of Selected Cell.
pub selected_cell_background_color: Color,
pub legal_move_background_color: Color,
/// Text color.
pub text_color: Color,
}
impl GameboardViewSettings {
/// Creates new gameboard view settings.
pub fn new() -> GameboardViewSettings {
GameboardViewSettings {
position: [10.0; 2],
size: 400.0,
black_background_color: [0.5, 0.5, 0.5, 1.0],
white_background_color: [0.9, 0.9, 0.9, 0.9],
border_color: [0.0, 0.0, 0.2, 1.0],
board_edge_color: [0.0, 0.0, 0.2, 1.0],
section_edge_color: [0.0, 0.0, 0.2, 1.0],
cell_edge_color: [0.0, 0.0, 0.2, 1.0],
board_edge_radius: 3.0,
section_edge_radius: 2.0,
cell_edge_radius: 1.0,
selected_cell_background_color: [0.4, 0.4, 1.0, 1.0],
legal_move_background_color: [0.7, 0.7, 1.0, 1.0],
text_color: [0.0, 0.0, 0.1, 1.0]
}
}
}
/// Stores visual information about a gameboard.
pub struct GameboardView {
/// Stores gameboard view settings.
pub settings: GameboardViewSettings,
}
impl GameboardView {
/// Creates a new gameboard view.
pub fn new(settings: GameboardViewSettings) -> GameboardView {
GameboardView {
settings: settings,
}
}
/// Draw gameboard.
pub fn draw<G: Graphics, C>(
&self,
controller: &GameboardController,
glyphs: &mut C,
c: &Context,
g: &mut G)
where C: CharacterCache<Texture = G::Texture>
{
use graphics::{Image, Line, Rectangle, Transformed};
let ref settings = self.settings;
let board_rect = [
settings.position[0], settings.position[1],
settings.size, settings.size,
];
// Draw board background.
let black_tile= Rectangle::new(settings.black_background_color);
let white_tile = Rectangle::new(settings.white_background_color);
white_tile.draw(board_rect, &c.draw_state, c.transform, g);
for i in 0..8 {
for j in 0..8 {
let black_rect = [
settings.position[0]+(settings.size/8.0*j as f64), settings.position[1]+(settings.size/8.0*i as f64),
settings.size/8.0, settings.size/8.0
];
if (i+j)%2!=0 {
black_tile.draw(black_rect, &c.draw_state, c.transform, g);
}
}
}
// Color selected cell.
if let Some (ind) = controller.selected_cell {
let cell_size = settings.size/8.0;
let pos = [ind[0] as f64 * cell_size, ind[1] as f64 * cell_size];
let cell_rect = [
settings.position[0]+pos[0],settings.position[1]+pos[1], cell_size, cell_size
];
Rectangle::new(settings.selected_cell_background_color)
.draw(cell_rect, &c.draw_state,c.transform, g);
let rects_to_color=&controller.legal_moves;
for i in rects_to_color {
let pos = [i[0] as f64 * cell_size, i[1] as f64 * cell_size];
let cell_rect = [
settings.position[0]+pos[0],settings.position[1]+pos[1], cell_size, cell_size
];
Rectangle::new(settings.legal_move_background_color)
.draw(cell_rect, &c.draw_state,c.transform, g);
}
}
// Draw characters.
let text_image = Image::new_color(settings.text_color);
let cell_size = settings.size / 8.0;
for j in 0..8 {
for i in 0..8 {
if let Some(ch) = controller.gameboard.char([i, j]) {
let pos = [
settings.position[0] + i as f64 * cell_size + 15.0,
settings.position[1] + j as f64 * cell_size + 34.0
];
let character = glyphs.character(34, ch);
let ch_x = pos[0] + character.left();
let ch_y = pos[1] - character.top();
text_image.draw(character.texture,
&c.draw_state,
c.transform.trans(ch_x, ch_y),
g);
}
}
}
// Draw cell borders.
let cell_edge = Line::new(settings.cell_edge_color, settings.cell_edge_radius);
for i in 0..8 {
let x = settings.position[0] + i as f64 / 8.0 * settings.size;
let y = settings.position[1] + i as f64 / 8.0 * settings.size;
let x2 = settings.position[0] + settings.size;
let y2 = settings.position[1] + settings.size;
let vline = [x, settings.position[1], x, y2];
cell_edge.draw(vline, &c.draw_state, c.transform, g);
let hline = [settings.position[0], y, x2, y];
cell_edge.draw(hline, &c.draw_state, c.transform, g);
}
// Draw board edge.
Rectangle::new_border(settings.board_edge_color, settings.board_edge_radius)
.draw(board_rect, &c.draw_state, c.transform, g);
}
}
|
use kyoto::data::{ Server, Params };
use kyoto::network::listen::listen;
use structopt::StructOpt;
/* Main function for kyoto.
* Start a webserver to listen to given port and accept new connections. */
pub fn main() -> kyoto::Result<()> {
/* Enable logging diagnostics. */
tracing_subscriber::fmt::try_init()?;
let params = Params::from_args();
let server = Server::new(params);
listen(server)?;
Ok(())
} |
/*
code copyinspired from: git@github.com:y-stm/rust-iconv.git (witch have a MIT license: https://raw.githubusercontent.com/andelf/rust-iconv/9e2fecaa09c5d1d632fc34b0291d31002f3053c0/Cargo.toml)
*/
use errno::{errno, Errno};
use libc;
use libc::size_t;
use log::info;
use std::borrow::Cow;
use std::error::Error;
use std::ffi::CString;
use std::fmt::{Display, Formatter};
#[link(name = "iconv")]
pub mod raw {
use libc::{c_char, c_int, c_void, size_t};
#[allow(non_camel_case_types)]
pub type iconv_t = *mut c_void;
/// Check whether iconv_t is successfully created
///
/// `iconv_open(3)` returns (iconv_t)-1 when failed to create new iconv object.
/// In rust we should use `std::mem::transmute` to check the return value.
#[inline]
pub fn is_iconv_t_valid(cd: iconv_t) -> bool {
let err = -1 as isize as *mut core::ffi::c_void;
cd != err
}
/// Check wheter iconv conversion is successfully done.
///
/// `iconv(3)` returns (size_t)-1 when conversion failed.
/// In rust we should use unsafe `std::mem::transmute` to check the return value.
#[inline]
pub fn is_iconv_valid(v: size_t) -> bool {
unsafe {
let err = ::std::mem::transmute::<isize, size_t>(-1);
v != err
}
}
extern "C" {
pub fn iconv(
cd: iconv_t,
inbuf: *const *const u8,
inbytesleft: *mut size_t,
outbuf: *mut *mut u8,
outbytesleft: *mut size_t,
) -> size_t;
pub fn iconv_open(tocode: *const c_char, fromcode: *const c_char) -> iconv_t;
pub fn iconv_close(cd: iconv_t) -> c_int;
}
}
/// The error given by iconv.
///
/// This include the below:
/// * Input/output buffer has no sufficient room during conversion.
/// * Failed to create iconv object
/// * Failed to make CString of encoding name (at `Iconv::new`)
#[derive(Debug)]
pub enum IconvError {
OnFindingConversion(Errno),
OnCStringConversion(std::ffi::NulError),
OnConversion(Cow<'static, str>),
InvalidSequence(usize, usize),
InsufficientOutBuffer(usize, usize),
InsufficientInBuffer(usize, usize),
}
impl IconvError {
/// Returns short description of `IconvError`
///
/// This is used in `(IconvError as Display)::fmt`
pub fn to_str(&self) -> String {
use self::IconvError::*;
match *self {
OnFindingConversion(ref e) => format!("C function `iconv_open` failed: {}", e),
OnCStringConversion(ref e) => format!("CString::new failed: {}", e),
OnConversion(ref cow_str) => cow_str.to_owned().to_string(),
InsufficientOutBuffer(ref left_to_convert, ref wrote_bytes) => format!(
"Need more room in dst buffer. {} bytes remain. {} bytes written",
left_to_convert, wrote_bytes
),
InsufficientInBuffer(ref remain_index, ref wrote_index) => format!(
"Need more input to complete conversion. {} bytes remain, {} bytes written",
remain_index, wrote_index
),
InvalidSequence(ref remain_index, ref wrote_index) => format!(
"Source text has invalid multibyte charactor sequence at {}. {} bytes \
written",
remain_index, wrote_index
),
}
}
}
impl Display for IconvError {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), std::fmt::Error> {
fmt.write_str(&self.to_str())
}
}
impl Error for IconvError {
fn description(&self) -> &str {
"Error happend using Iconv"
}
}
pub struct Iconv {
iconv: raw::iconv_t,
}
impl Iconv {
pub fn new(tocode: &str, fromcode: &str) -> Result<Iconv, IconvError> {
let to_iconv_err = |null_err| Err(IconvError::OnCStringConversion(null_err));
let tocode_c = CString::new(tocode).or_else(&to_iconv_err)?;
let fromcode_c = CString::new(fromcode).or_else(&to_iconv_err)?;
unsafe {
let cd = raw::iconv_open(tocode_c.into_raw(), fromcode_c.into_raw());
if !raw::is_iconv_t_valid(cd) {
return Err(IconvError::OnFindingConversion(errno()));
}
Ok(Iconv { iconv: cd })
}
}
/// Convert src sequence into anotherencoding.
///
/// Convert src sequence into another encoding, putting them into dst buffer from start.
/// # Arguments
/// * `src` - The input text (&[u8] encoded in "from encoding")
/// * `dst` - Where the converted text is stored.
/// # Returns
/// * `Result::Ok(usize)` - When all text in `src` are converted successfully,
/// it returns the bytes of written to `dst`
/// * `Result::Err(InsufficientOutBuffer(usize, usize)) -
/// If there's no sufficient room in `dst`, returns the index of `src` where conversion
/// is processed and the index of `dst` where the converted text is written.
pub fn convert_raw(&mut self, src: &[u8], mut dst: &mut [u8]) -> Result<usize, IconvError> {
use self::IconvError::*;
let mut inbytes_left = src.len();
let mut outbytes_left = dst.len();
unsafe {
let inbytes_left = &mut inbytes_left as *mut size_t;
let outbytes_left = &mut outbytes_left as *mut size_t;
let unsafe_src = &src as *const &[u8] as *const *const u8;
let unsafe_dst = &mut dst as *mut &mut [u8] as *mut *mut u8;
let res = raw::iconv(
self.iconv,
unsafe_src,
inbytes_left,
unsafe_dst,
outbytes_left,
);
info!(
"inbytes_left:{}, outbytes_left:{}, res:{}",
*inbytes_left, *outbytes_left, res
);
if !raw::is_iconv_valid(res) {
let Errno(err_num) = errno();
match err_num {
libc::E2BIG => {
info!("output buffer has no sufficient room");
return Err(InsufficientOutBuffer(
src.len() - (*inbytes_left as usize),
dst.len() - (*outbytes_left as usize),
));
}
libc::EILSEQ => {
info!("Invalid multibyte in input");
return Err(InvalidSequence(
src.len() - (*inbytes_left as usize),
dst.len() - (*outbytes_left as usize),
));
}
libc::EINVAL => {
info!("Insufficient multibyte in input. Maybe more input is needed.");
return Err(InsufficientInBuffer(
src.len() - (*inbytes_left as usize),
dst.len() - (*outbytes_left as usize),
));
}
_ => {
info!("Unknown error");
return Err(OnConversion(Cow::Borrowed("Unknown Error")));
}
}
}
Ok(dst.len() - (*outbytes_left as usize))
}
}
/// Converts text into another encoding.
///
/// #Arguments
/// * `src` - The input text
/// * `dst` - The output text encoded in this struct.
/// * `start_index` - The index of `dst` where output will be written.
///
/// #Returns
/// * Ok(unit) - Returns when all text in `src` has converted into `dst`
/// * IconvError(InsufficientInBuffer(usize, usize))
/// - Returns when `src` has incomplete sequence.
/// It may be recovered by adding more sequence of input.
/// * IconvError(InvalidSequence(usize, usize))
/// - Returns when `src` has invalid multibyte sequence.
/// * IconvError(OnConversion(_)) - Something wrong happend due to any other reason
pub fn convert(
&mut self,
src: &[u8],
dst: &mut Vec<u8>,
start_index: usize,
) -> Result<(), IconvError> {
let mut src_index = 0;
let mut dst_index = start_index;
loop {
match self.convert_raw(&src[src_index..], &mut dst[dst_index..]) {
Ok(written) => {
let new_length = written + dst_index;
dst.truncate(new_length);
return Ok(());
}
Err(IconvError::InsufficientOutBuffer(left_index, wrote_index)) => {
let room = ((src.len() - left_index) * 2) + 10;
dst.reserve(room);
unsafe {
dst.set_len(room + dst_index);
}
src_index = left_index;
dst_index = wrote_index;
continue;
}
err => {
return err.and(Ok(()));
}
}
}
}
}
impl Drop for Iconv {
fn drop(&mut self) {
unsafe {
if raw::iconv_close(self.iconv) == -1 {
info!("Error on disposing iconv descriptor: {}", errno());
}
}
}
}
#[test]
fn test_iconv_raw() {
let mut iconv = Iconv::new("cp932", "utf-8").unwrap();
let src = "あいうえお".bytes().collect::<Vec<u8>>();
let mut outbuf = [0u8; 1000];
let res = iconv.convert_raw(&src, &mut outbuf).unwrap();
info!("First result: {}", res);
info!("CP932 converted: {:?}", &outbuf[0..11]);
let mut outbuf2 = [0u8; 1000];
let mut iconv = Iconv::new("utf-8", "cp932").unwrap();
let res2 = iconv.convert_raw(&outbuf[0..res], &mut outbuf2).unwrap();
info!("Second result: {}", res2);
let s = String::from_utf8_lossy(&outbuf2[0..res2]);
info!("Recoverd: {}", s);
assert_eq!(&s, "あいうえお");
}
/// Confirm that `Iconv::convert_raw` set errno to E2BIG when output buffer has no room
#[test]
fn what_if_dst_array_is_short() {
let mut iconv = Iconv::new("cp932", "utf-8").unwrap();
let src = "あいうえお".bytes().collect::<Vec<u8>>();
let mut outbuf = [0u8; 4];
info!("Let's begin shortcomming\n");
if let Err(IconvError::InsufficientOutBuffer(_, _)) = iconv.convert_raw(&src, &mut outbuf) {
} else {
unreachable!();
}
}
/// Test `Iconv::convert` gives the same result as the input through utf-8 -> cp932 -> utf-8
/// conversions
#[test]
fn test_convert_raw_turn() {
let mut iconv = Iconv::new("cp932", "utf-8").unwrap();
let mut iconv_rev = Iconv::new("utf-8", "cp932").unwrap();
let src = "あいうえお".bytes().collect::<Vec<u8>>();
let mut dst = Vec::new();
let res = iconv.convert(&src, &mut dst, 0);
match res {
Ok(_) => {}
_ => {
panic!("Conversion failed");
}
}
info!("line {}: Result of convert: {:?}", line!(), dst);
let mut dst2 = Vec::new();
let _ = iconv_rev.convert(&dst, &mut dst2, 0);
let s_recoverd = String::from_utf8(dst2).unwrap();
assert_eq!("あいうえお".to_string(), s_recoverd);
}
|
//! Synchronization primitives.
use core::cell::UnsafeCell;
use core::sync::atomic::{self, AtomicBool};
use core::ops;
use shim;
/// A mutual exclusive container.
///
/// This assures that only one holds mutability of the inner value. To get the inner value, you
/// need acquire the "lock". If you try to lock it while a lock is already held elsewhere, it will
/// block the thread until the lock is released.
pub struct Mutex<T> {
/// The inner value.
inner: UnsafeCell<T>,
/// The lock boolean.
///
/// This is true, if and only if the lock is currently held.
locked: AtomicBool,
}
impl<T> Mutex<T> {
/// Create a new mutex with some inner value.
#[inline]
pub const fn new(inner: T) -> Mutex<T> {
Mutex {
inner: UnsafeCell::new(inner),
locked: AtomicBool::new(false),
}
}
/// Lock this mutex.
///
/// If another lock is held, this will block the thread until it is released.
#[inline]
pub fn lock(&self) -> MutexGuard<T> {
// Lock the mutex.
#[cfg(not(feature = "unsafe_no_mutex_lock"))]
while self.locked.compare_and_swap(false, true, atomic::Ordering::SeqCst) {
// ,___,
// {O,o}
// |)``)
// SRSLY?
shim::syscalls::sched_yield();
}
MutexGuard {
mutex: self,
}
}
}
/// A mutex guard.
///
/// This acts as the lock.
#[must_use]
pub struct MutexGuard<'a, T: 'a> {
/// The parent mutex.
mutex: &'a Mutex<T>,
}
/// Release the mutex.
impl<'a, T> Drop for MutexGuard<'a, T> {
#[inline]
fn drop(&mut self) {
self.mutex.locked.store(false, atomic::Ordering::SeqCst);
}
}
impl<'a, T> ops::Deref for MutexGuard<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// Aliasing is allowed due to the lock representing mutual exclusive access.
&*self.mutex.inner.get()
}
}
}
impl<'a, T> ops::DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// Aliasing is allowed due to the lock representing mutual exclusive access.
&mut *self.mutex.inner.get()
}
}
}
unsafe impl<T: Send> Send for Mutex<T> {}
unsafe impl<T: Send> Sync for Mutex<T> {}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_mutex() {
let mutex = Mutex::new(3);
assert_eq!(*mutex.lock(), 3);
*mutex.lock() = 4;
assert_eq!(*mutex.lock(), 4);
*mutex.lock() = 0xFF;
assert_eq!(*mutex.lock(), 0xFF);
}
}
|
//
// Copyright 2020 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! Provides implementations of Oak ABI functions to test the async executor against.
//!
//! `init()` must be called at the start of each test case, or state from a previous test run on the
//! same thread may interfere with later tests.
//!
//! The following functions are available to modify the result of Runtime ABI calls:
//! * `set_wait_on_channels_handler`: Configures a handler function that can set statuses for
//! channels when `wait_on_channels` is called.
//! * `add_ready_data`: Adds a `Message` to the queue for a handle. Calls to `channel_read` will
//! return the values in this queue in insertion order.
//! * `set_error`: Adds an error status into the queue for a handle.
use core::cell::RefCell;
use oak::io::Encodable;
use oak_abi::{
proto::oak::{ChannelReadStatus, OakStatus},
Handle,
};
use std::collections::{HashMap, VecDeque};
pub fn init() {
WAIT_ON_CHANNELS_HANDLER.with(|handler| handler.replace(None));
READY_DATA.with(|ready_data| ready_data.borrow_mut().clear());
}
#[repr(packed)]
pub struct HandleWithStatus {
handle: Handle,
status: u8,
}
impl HandleWithStatus {
pub fn handle(&self) -> Handle {
self.handle
}
pub fn set_status(&mut self, status: ChannelReadStatus) {
self.status = status as i32 as u8;
}
}
#[no_mangle]
pub extern "C" fn wait_on_channels(buf: *mut u8, count: u32) -> u32 {
let handles =
unsafe { core::slice::from_raw_parts_mut(buf as *mut HandleWithStatus, count as usize) };
WAIT_ON_CHANNELS_HANDLER.with(|handler| {
(handler
.borrow_mut()
.as_mut()
.expect("no wait_on_channels handler configured"))(handles)
}) as i32 as u32
}
/// Stub function necessary for Oak ABI.
#[no_mangle]
pub extern "C" fn wait_on_channels_with_downgrade(_buf: *mut u8, _count: u32) -> u32 {
0
}
type WaitOnChannelsHandler = Box<dyn FnMut(&mut [HandleWithStatus]) -> OakStatus>;
std::thread_local! {
static WAIT_ON_CHANNELS_HANDLER: RefCell<Option<WaitOnChannelsHandler>> = RefCell::new(None);
}
pub fn set_wait_on_channels_handler<F: 'static + FnMut(&mut [HandleWithStatus]) -> OakStatus>(
handler: F,
) {
WAIT_ON_CHANNELS_HANDLER.with(|cell| cell.replace(Some(Box::new(handler))));
}
#[no_mangle]
pub extern "C" fn channel_read(
handle: Handle,
buf: *mut u8,
size: usize,
actual_size: *mut u32,
_handle_buf: *mut u8,
_handle_count: u32,
actual_handle_count: *mut u32,
) -> u32 {
READY_DATA.with(|ready_data| {
let mut ready_data = ready_data.borrow_mut();
let data_queue = ready_data.entry(handle).or_default();
let status = match data_queue.front() {
None => OakStatus::ErrChannelEmpty,
Some(Err(status)) => *status,
Some(Ok(data)) if data.len() > size => {
unsafe { *actual_size = data.len() as u32 };
OakStatus::ErrBufferTooSmall
}
Some(Ok(data)) => {
unsafe {
let buf = core::slice::from_raw_parts_mut(buf, data.len());
buf.copy_from_slice(data);
*actual_size = data.len() as u32;
*actual_handle_count = 0;
};
let _ = data_queue.pop_front();
OakStatus::Ok
}
};
status as i32 as u32
})
}
/// Stub function necessary for Oak ABI.
#[no_mangle]
pub extern "C" fn channel_read_with_downgrade(
_handle: Handle,
_buf: *mut u8,
_size: usize,
_actual_size: *mut u32,
_handle_buf: *mut u8,
_handle_count: u32,
_actual_handle_count: *mut u32,
) -> u32 {
0
}
type ReadyChannelData = VecDeque<Result<Vec<u8>, OakStatus>>;
std::thread_local! {
static READY_DATA: RefCell<HashMap<Handle, ReadyChannelData>> = RefCell::new(HashMap::new());
}
pub fn add_ready_data<T: Encodable>(handle: Handle, data: &T) {
let msg = data.encode().expect("Failed to encode ready data");
READY_DATA.with(|ready_data| {
ready_data
.borrow_mut()
.entry(handle)
.or_default()
.push_back(Ok(msg.bytes))
});
}
// Note: this status is added into the queue of ready data, so any previously add ready data must
// first be read before this status will be returned. The channel status is never removed, any
// future reads will always return this error status.
pub fn set_error(handle: Handle, status: OakStatus) {
assert_ne!(status, OakStatus::Ok);
READY_DATA.with(|ready_data| {
ready_data
.borrow_mut()
.entry(handle)
.or_default()
.push_back(Err(status))
});
}
|
use error::{StateError, StateResult};
use fonts::Fonts;
use image::Image;
use meta::Meta;
use project::Project;
use state::SiteState;
use website::{Website};
pub trait Valid {
fn is_valid(&self) -> StateResult;
}
impl Valid for SiteState {
fn is_valid(&self) -> StateResult {
if !self.source.exists() {
return Err(StateError::new("Source is required"))
}
if !self.destination.exists() {
return Err(StateError::new("Destination is required"))
}
self.website.is_valid()?;
Ok(String::from("AppState is Valid"))
}
}
impl Valid for Project {
fn is_valid(&self) -> StateResult {
if !self.path.exists() {
println!("Error finding path for project: {:?}", &self.path);
return Err(StateError::new(&format!("Path for {} was not found", &self.meta.title)));
}
for i in self.images.iter() {
i.is_valid()?;
}
Ok(String::from("Project is Valid"))
}
}
impl Valid for Meta {
fn is_valid(&self) -> StateResult {
if self.title.len() < 1 {
return Err(StateError::new("Found empty project title"));
}
if self.context.len() < 1 {
return Err(StateError::new(&format!("{} has an empty project subtitle", &self.context)));
}
Ok(String::from("Meta is valid"))
}
}
impl Valid for Image {
fn is_valid(&self) -> StateResult {
if !self.path.exists() {
return Err(StateError::new(&format!("Image was not found at {:?}", &self.path)));
}
Ok(String::new())
}
}
impl Valid for Fonts {
fn is_valid(&self) -> StateResult {
if let Some(ref n) = self.normal {
if !n.exists() {
return Err(StateError::new("Normal font was not found"));
}
} else {
return Err(StateError::new("Normal font was not found"));
}
if let Some(ref b) = self.bold {
if !b.exists() {
return Err(StateError::new("Bold font was not found"));
}
} else {
return Err(StateError::new("Normal font was not found"));
}
Ok(String::new())
}
}
impl Valid for Website {
fn is_valid(&self) -> StateResult {
if self.about.len() < 1 {
return Err(StateError::new("About cannot be empty"))
}
if !self.image.exists() {
return Err(StateError::new("Image for about page is required"))
}
for e in self.portfolio.iter() {
e.is_valid()?;
}
self.fonts.is_valid()?;
Ok(String::from("Website is valid"))
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.