text
stringlengths
8
4.13M
use std::fs; use std::path::Path; use std::str; use clap::Parser; use cutter::imageprocessing::{str_to_size, transform_images, Size}; use cutter::s3::{download_from_s3, upload_to_s3}; use cutter::util::get_files_in_dir; mod cutter; extern crate clap; pub const DEFAULT_REGION: &str = "eu-central-1"; const DEFAULT_CROP_SIZES: [&str; 4] = ["200x200", "400x400", "800x800", "1920x1080"]; #[derive(Debug, Parser)] pub struct Config { /// Path to files to run Cutter on. /// Cannot be used if files are fetched from a remote. #[clap(short = 'p', long = "path", conflicts_with = "fetch-remote")] pub files_path: String, /// Sizes to crop into. Can be used multiple times. /// format: WIDTHxHEIGHT #[clap(short='s', parse(try_from_str=str_to_size), default_values=&DEFAULT_CROP_SIZES)] pub crop_sizes: Vec<Size>, /// Clean output directory before starting. #[clap(short)] pub clean: bool, /// Overwrite existing files. #[clap(short, long)] pub overwrite: bool, /// Tmp dir to store output files in. #[clap(short, long, default_value = "/tmp/cutter")] pub tmp_dir: String, /// Enable verbose output. #[clap(short, long)] pub verbose: bool, /// Name of S3 bucket to upload files to. #[clap(short = 'b')] pub s3_bucket_name: Option<String>, /// Region of S3 bucket. #[clap(long)] pub s3_region: Option<String>, /// Prefix for files uploaded to S3. #[clap(long)] pub s3_prefix: Option<String>, /// Fetch files from S3 bucket for Cutting. #[clap(short = 'r', long)] pub fetch_remote: Option<bool>, } #[tokio::main] pub async fn main() { let config = Config::parse(); run(config).await; } pub async fn run(config: Config) { println!("Executing with config: {:?}", config); if config.verbose { explain_config(&config); } if Path::new(&config.tmp_dir).exists() && (config.clean || config.overwrite) { fs::remove_dir_all(&config.tmp_dir).unwrap(); } if !Path::new(&config.tmp_dir).exists() { fs::create_dir(&config.tmp_dir).unwrap(); } if let Some(fetch_remote) = config.fetch_remote { if config.s3_bucket_name.is_none() { panic!("shouldnt happen because config cheks for this :)"); } if fetch_remote { if let Some(s3_bucket_name) = &config.s3_bucket_name { download_from_s3( s3_bucket_name, &config .s3_region .to_owned() .unwrap_or_else(|| DEFAULT_REGION.to_string()), &config .s3_prefix .to_owned() .unwrap_or_else(|| "".to_string()), &config.files_path, config.overwrite, config.clean, config.verbose, ) .await; } } } println!("Finding files in {}", &config.files_path); let files = get_files_in_dir(config.files_path); let processed_files = transform_images( files, config.tmp_dir.to_owned(), &config.crop_sizes, config.verbose, ) .await; if let Some(s3_bucket_name) = config.s3_bucket_name { upload_to_s3( &s3_bucket_name, &config .s3_region .unwrap_or_else(|| DEFAULT_REGION.to_string()), &config.s3_prefix.unwrap_or_else(|| "".to_string()), &config.tmp_dir, processed_files, config.verbose, ) .await; } println!("Done!"); } fn explain_config(config: &Config) { println!("Explaining configuration: {:?}", config); println!("*************** CONFIGURATION ***************"); if let Some(s3_bucket_name) = &config.s3_bucket_name { println!( "Will publish files to S3 bucket '{}' after completion", s3_bucket_name ); println!("Will overwrite files on remote: {}", config.overwrite); } if let Some(fetch_remote) = config.fetch_remote { if fetch_remote { println!( "Fetching files from remote: {}/{}", config .s3_bucket_name .as_ref() .expect("need s3 bucket name if going to fetch from remote"), config.s3_prefix.as_ref().unwrap_or(&"".to_string()) ); } } else { println!( "Path to source files locally on this host: {}", config.files_path ); } println!("Working/temporary directory: {}", config.tmp_dir); if config.clean { println!("Will clean working directory before starting"); } println!( "Will crop to the following {} size(s):", config.crop_sizes.len() ); for size in &config.crop_sizes { println!("\t{:?}", size); } println!("*************** END CONFIGURATION ***************"); }
#[macro_use] extern crate clap; #[macro_use] extern crate log; extern crate simplelog; mod day1; mod day2; mod io; use clap::App; use log::LevelFilter; use simplelog::TermLogger; use simplelog::CombinedLogger; use simplelog::Config; fn init_terminal_logger(log_level: LevelFilter) { CombinedLogger::init(vec![TermLogger::new(log_level, Config::default()).unwrap()]).unwrap(); info!("Hello!"); info!("Logger initialized with level {}", log_level); } fn main() { init_terminal_logger(LevelFilter::Debug); let yaml = load_yaml!("cli.yml"); let matches = App::from(yaml).get_matches(); let mut has_arg = false; if matches.is_present("day1_1") ||matches.is_present("all") { day1::day1_1(); has_arg = true; } if matches.is_present("day1_2") ||matches.is_present("all") { day1::day1_2(); has_arg = true; } if matches.is_present("day2_1") ||matches.is_present("all") { day2::day2_1(); has_arg = true; } if matches.is_present("day2_2") ||matches.is_present("all") { day2::day2_2(); has_arg = true; } if !has_arg { warn!("No argument given"); } info!("Goodbye"); }
// brings io and Rng traits into scope // brings the Ordering enum type into scope use rand::Rng; use std::cmp::Ordering; use std::io; fn main() { println!("Guess the number!"); // thread_rng(): a random nr generator local to the current thread and seeded from the OS // gen_range(): method defined by the Rng trait // cargo doc --open in this dir compiles docs from all the dependencies and opens in browser // Rust infers that the type of the variable is a number, defaults to i32 type let secret_number = rand::thread_rng().gen_range(1, 101); // loop: Starts an infinite loop loop { println!("Please input your guess."); // let: variable definition // mut: makes it mutable, by default all is immutable // String: a type // ::new: an "associated function" of the String type. Equivalent of static function. // Rust infers that the type of guess is string. let mut guess = String::new(); // Could be std::io::stdin if std::io was not brought into scope io::stdin() // &: argument is a reference, immutable by default // &mut: makes reference mutable // returns io::Result type, an enum with variants Ok or Err. .read_line(&mut guess) // calls the expect method of the Result type. // On Err value, causes a crash and displays the msg // On Ok value, returns the value that Ok is holding (nr of read bytes) .expect("Failed to read line"); // shadow the previous guess variable // trim: remove whitespace // parse: parses a string into one of the number types. // Returns a Result type. // : u32: annotate the variable's type to u32. // Because of the comparison later, // Rust infers that the type of the compared number is also u32. // match: expression to handle variants of the Result enum returned by parse() let guess: u32 = match guess.trim().parse() { // Ok: pattern to handle the Ok type returned by parse, which contains the parsed number Ok(num) => num, // Err(_) : pattern, catching all types of Err values Err(_) => continue, }; println!("You guessed: {}", guess); // cmp: returns an Ordering type // Ordering: an enum with 3 variants: Less, Greater, Equal // match: expression, made up of arms. // Arm = pattern + code that should run if the value matches the pattern match guess.cmp(&secret_number) { Ordering::Less => println!("Too small!"), Ordering::Greater => println!("Too big!"), Ordering::Equal => { println!("You win!"); // break the loop break; } } } }
#[doc = "Reader of register ACT_SRC"] pub type R = crate::R<u32, super::ACT_SRC>; #[doc = "Reader of field `SRC_ADDR`"] pub type SRC_ADDR_R = crate::R<u32, u32>; impl R { #[doc = "Bits 0:31 - Current address of source location."] #[inline(always)] pub fn src_addr(&self) -> SRC_ADDR_R { SRC_ADDR_R::new((self.bits & 0xffff_ffff) as u32) } }
// This file is part of rdma-core. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/rdma-core/master/COPYRIGHT. No part of rdma-core, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2017 The developers of rdma-core. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/rdma-core/master/COPYRIGHT. #[derive(Debug)] pub struct DeviceListIterator<'a> { list: *mut *mut ibv_device, size: usize, next: *mut ibv_device, lifetime: PhantomData<&'a ibv_device>, } impl<'a> Drop for DeviceListIterator<'a> { #[inline(always)] fn drop(&mut self) { unsafe { ibv_free_device_list(self.list) } } } impl<'a> Iterator for DeviceListIterator<'a> { type Item = Device<'a>; #[inline(always)] fn next(&mut self) -> Option<Self::Item> { let next = self.next; if unlikely(next.is_null()) { return None; } self.next = unsafe { next.offset(1) }; Some ( Device { pointer: next, parent: PhantomData, } ) } } impl<'a> DeviceListIterator<'a> { #[inline(always)] pub fn devices() -> Self { let mut size = 0; let list = unsafe { ibv_get_device_list(&mut size) }; if unlikely(list.is_null()) { let errno = errno(); match errno.0 { E::EPERM => panic!("Permission denied"), E::ENOSYS => panic!("Linux kernel does not support RDMA"), E::ENOMEM => panic!("Out of memory"), unexpected @ _ => panic!("ibv_get_device_list returned unexpected error number '{}' ('{}')", unexpected, errno), } } Self { list: list, size: size as usize, next: unsafe { *list }, lifetime: PhantomData, } } #[inline(always)] pub fn is_empty(&self) -> bool { self.list.is_null() } #[inline(always)] pub fn len(&self) -> usize { self.size } }
//! Tests for nonce validity checks use aead::{generic_array::GenericArray, Aead, KeyInit}; use mgm::Mgm; #[test] fn kuznyechik_bad_nonce() { let key = GenericArray::from_slice(&[0u8; 32]); let mut nonce = GenericArray::clone_from_slice(&[0u8; 16]); let cipher = Mgm::<kuznyechik::Kuznyechik>::new(key); let mut enc_data = cipher.encrypt(&nonce, &[][..]).unwrap(); let res = cipher.decrypt(&nonce, &enc_data[..]); assert!(res.is_ok()); enc_data[0] ^= 0x80; let res = cipher.decrypt(&nonce, &enc_data[..]); assert!(res.is_err()); nonce[0] ^= 0x80; let res = cipher.encrypt(&nonce, &[][..]); assert!(res.is_err()); let res = cipher.decrypt(&nonce, &enc_data[..]); assert!(res.is_err()); }
//! B/W Color for EPDs //! //! EPD representation of multicolor with separate buffers //! for each bit makes it hard to properly represent colors here #[cfg(feature = "graphics")] use embedded_graphics_core::pixelcolor::BinaryColor; #[cfg(feature = "graphics")] use embedded_graphics_core::pixelcolor::PixelColor; /// When trying to parse u8 to one of the color types #[derive(Debug, PartialEq, Eq)] pub struct OutOfColorRangeParseError(u8); impl core::fmt::Display for OutOfColorRangeParseError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "Outside of possible Color Range: {}", self.0) } } impl OutOfColorRangeParseError { fn _new(size: u8) -> OutOfColorRangeParseError { OutOfColorRangeParseError(size) } } /// Only for the Black/White-Displays // TODO : 'color' is not a good name for black and white, rename it to BiColor/BWColor ? #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum Color { /// Black color Black, /// White color White, } /// Only for the Black/White/Color-Displays #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum TriColor { /// Black color Black, /// White color White, /// Chromatic color Chromatic, } /// For the 5in65 7 Color Display #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum OctColor { /// Black Color Black = 0x00, /// White Color White = 0x01, /// Green Color Green = 0x02, /// Blue Color Blue = 0x03, /// Red Color Red = 0x04, /// Yellow Color Yellow = 0x05, /// Orange Color Orange = 0x06, /// HiZ / Clean Color HiZ = 0x07, } /// Color trait for use in `Display`s pub trait ColorType: PixelColor { /// Number of bit used to represent this color type in a single buffer. /// To get the real number of bits per pixel you should multiply this by `BUFFER_COUNT` const BITS_PER_PIXEL_PER_BUFFER: usize; /// Number of buffer used to represent this color type /// splitted buffer like tricolo is 2, otherwise this should be 1. const BUFFER_COUNT: usize; /// Return the data used to set a pixel color /// /// * bwrbit is used to tell the value of the unused bit when a chromatic /// color is set (TriColor only as for now) /// * pos is the pixel position in the line, used to know which pixels must be set /// /// Return values are : /// * .0 is the mask used to exclude this pixel from the byte (eg: 0x7F in BiColor) /// * .1 are the bits used to set the color in the byte (eg: 0x80 in BiColor) /// this is u16 because we set 2 bytes in case of split buffer fn bitmask(&self, bwrbit: bool, pos: u32) -> (u8, u16); } impl ColorType for Color { const BITS_PER_PIXEL_PER_BUFFER: usize = 1; const BUFFER_COUNT: usize = 1; fn bitmask(&self, _bwrbit: bool, pos: u32) -> (u8, u16) { let bit = 0x80 >> (pos % 8); match self { Color::Black => (!bit, 0u16), Color::White => (!bit, bit as u16), } } } impl ColorType for TriColor { const BITS_PER_PIXEL_PER_BUFFER: usize = 1; const BUFFER_COUNT: usize = 2; fn bitmask(&self, bwrbit: bool, pos: u32) -> (u8, u16) { let bit = 0x80 >> (pos % 8); match self { TriColor::Black => (!bit, 0u16), TriColor::White => (!bit, bit as u16), TriColor::Chromatic => ( !bit, if bwrbit { (bit as u16) << 8 } else { (bit as u16) << 8 | bit as u16 }, ), } } } impl ColorType for OctColor { const BITS_PER_PIXEL_PER_BUFFER: usize = 4; const BUFFER_COUNT: usize = 1; fn bitmask(&self, _bwrbit: bool, pos: u32) -> (u8, u16) { let mask = !(0xF0 >> (pos % 2)); let bits = self.get_nibble() as u16; (mask, if pos % 2 == 1 { bits } else { bits << 4 }) } } #[cfg(feature = "graphics")] impl From<BinaryColor> for OctColor { fn from(b: BinaryColor) -> OctColor { match b { BinaryColor::On => OctColor::Black, BinaryColor::Off => OctColor::White, } } } #[cfg(feature = "graphics")] impl From<OctColor> for embedded_graphics_core::pixelcolor::Rgb888 { fn from(b: OctColor) -> Self { let (r, g, b) = b.rgb(); Self::new(r, g, b) } } #[cfg(feature = "graphics")] impl From<embedded_graphics_core::pixelcolor::Rgb888> for OctColor { fn from(p: embedded_graphics_core::pixelcolor::Rgb888) -> OctColor { use embedded_graphics_core::prelude::RgbColor; let colors = [ OctColor::Black, OctColor::White, OctColor::Green, OctColor::Blue, OctColor::Red, OctColor::Yellow, OctColor::Orange, OctColor::HiZ, ]; // if the user has already mapped to the right color space, it will just be in the list if let Some(found) = colors.iter().find(|c| c.rgb() == (p.r(), p.g(), p.b())) { return *found; } // This is not ideal but just pick the nearest color *colors .iter() .map(|c| (c, c.rgb())) .map(|(c, (r, g, b))| { let dist = (i32::from(r) - i32::from(p.r())).pow(2) + (i32::from(g) - i32::from(p.g())).pow(2) + (i32::from(b) - i32::from(p.b())).pow(2); (c, dist) }) .min_by_key(|(_c, dist)| *dist) .map(|(c, _)| c) .unwrap_or(&OctColor::White) } } #[cfg(feature = "graphics")] impl From<embedded_graphics_core::pixelcolor::raw::RawU4> for OctColor { fn from(b: embedded_graphics_core::pixelcolor::raw::RawU4) -> Self { use embedded_graphics_core::prelude::RawData; OctColor::from_nibble(b.into_inner()).unwrap() } } #[cfg(feature = "graphics")] impl PixelColor for OctColor { type Raw = embedded_graphics_core::pixelcolor::raw::RawU4; } impl OctColor { /// Gets the Nibble representation of the Color as needed by the display pub fn get_nibble(self) -> u8 { self as u8 } /// Converts two colors into a single byte for the Display pub fn colors_byte(a: OctColor, b: OctColor) -> u8 { a.get_nibble() << 4 | b.get_nibble() } ///Take the nibble (lower 4 bits) and convert to an OctColor if possible pub fn from_nibble(nibble: u8) -> Result<OctColor, OutOfColorRangeParseError> { match nibble & 0xf { 0x00 => Ok(OctColor::Black), 0x01 => Ok(OctColor::White), 0x02 => Ok(OctColor::Green), 0x03 => Ok(OctColor::Blue), 0x04 => Ok(OctColor::Red), 0x05 => Ok(OctColor::Yellow), 0x06 => Ok(OctColor::Orange), 0x07 => Ok(OctColor::HiZ), e => Err(OutOfColorRangeParseError(e)), } } ///Split the nibbles of a single byte and convert both to an OctColor if possible pub fn split_byte(byte: u8) -> Result<(OctColor, OctColor), OutOfColorRangeParseError> { let low = OctColor::from_nibble(byte & 0xf)?; let high = OctColor::from_nibble((byte >> 4) & 0xf)?; Ok((high, low)) } /// Converts to limited range of RGB values. pub fn rgb(self) -> (u8, u8, u8) { match self { OctColor::White => (0xff, 0xff, 0xff), OctColor::Black => (0x00, 0x00, 0x00), OctColor::Green => (0x00, 0xff, 0x00), OctColor::Blue => (0x00, 0x00, 0xff), OctColor::Red => (0xff, 0x00, 0x00), OctColor::Yellow => (0xff, 0xff, 0x00), OctColor::Orange => (0xff, 0x80, 0x00), OctColor::HiZ => (0x80, 0x80, 0x80), /* looks greyish */ } } } //TODO: Rename get_bit_value to bit() and get_byte_value to byte() ? impl Color { /// Get the color encoding of the color for one bit pub fn get_bit_value(self) -> u8 { match self { Color::White => 1u8, Color::Black => 0u8, } } /// Gets a full byte of black or white pixels pub fn get_byte_value(self) -> u8 { match self { Color::White => 0xff, Color::Black => 0x00, } } /// Parses from u8 to Color fn from_u8(val: u8) -> Self { match val { 0 => Color::Black, 1 => Color::White, e => panic!( "DisplayColor only parses 0 and 1 (Black and White) and not `{}`", e ), } } /// Returns the inverse of the given color. /// /// Black returns White and White returns Black pub fn inverse(self) -> Color { match self { Color::White => Color::Black, Color::Black => Color::White, } } } impl From<u8> for Color { fn from(value: u8) -> Self { Color::from_u8(value) } } #[cfg(feature = "graphics")] impl PixelColor for Color { type Raw = (); } #[cfg(feature = "graphics")] impl From<BinaryColor> for Color { fn from(b: BinaryColor) -> Color { match b { BinaryColor::On => Color::Black, BinaryColor::Off => Color::White, } } } #[cfg(feature = "graphics")] impl From<embedded_graphics_core::pixelcolor::Rgb888> for Color { fn from(rgb: embedded_graphics_core::pixelcolor::Rgb888) -> Self { use embedded_graphics_core::pixelcolor::RgbColor; if rgb == RgbColor::BLACK { Color::Black } else if rgb == RgbColor::WHITE { Color::White } else { // choose closest color if (rgb.r() as u16 + rgb.g() as u16 + rgb.b() as u16) > 255 * 3 / 2 { Color::White } else { Color::Black } } } } #[cfg(feature = "graphics")] impl From<Color> for embedded_graphics_core::pixelcolor::Rgb888 { fn from(color: Color) -> Self { use embedded_graphics_core::pixelcolor::RgbColor; match color { Color::Black => embedded_graphics_core::pixelcolor::Rgb888::BLACK, Color::White => embedded_graphics_core::pixelcolor::Rgb888::WHITE, } } } impl TriColor { /// Get the color encoding of the color for one bit pub fn get_bit_value(self) -> u8 { match self { TriColor::White => 1u8, TriColor::Black | TriColor::Chromatic => 0u8, } } /// Gets a full byte of black or white pixels pub fn get_byte_value(self) -> u8 { match self { TriColor::White => 0xff, TriColor::Black | TriColor::Chromatic => 0x00, } } } #[cfg(feature = "graphics")] impl PixelColor for TriColor { type Raw = (); } #[cfg(feature = "graphics")] impl From<BinaryColor> for TriColor { fn from(b: BinaryColor) -> TriColor { match b { BinaryColor::On => TriColor::Black, BinaryColor::Off => TriColor::White, } } } #[cfg(feature = "graphics")] impl From<embedded_graphics_core::pixelcolor::Rgb888> for TriColor { fn from(rgb: embedded_graphics_core::pixelcolor::Rgb888) -> Self { use embedded_graphics_core::pixelcolor::RgbColor; if rgb == RgbColor::BLACK { TriColor::Black } else if rgb == RgbColor::WHITE { TriColor::White } else { // there is no good approximation here since we don't know which color is 'chromatic' TriColor::Chromatic } } } #[cfg(feature = "graphics")] impl From<TriColor> for embedded_graphics_core::pixelcolor::Rgb888 { fn from(tri_color: TriColor) -> Self { use embedded_graphics_core::pixelcolor::RgbColor; match tri_color { TriColor::Black => embedded_graphics_core::pixelcolor::Rgb888::BLACK, TriColor::White => embedded_graphics_core::pixelcolor::Rgb888::WHITE, // assume chromatic is red TriColor::Chromatic => embedded_graphics_core::pixelcolor::Rgb888::new(255, 0, 0), } } } #[cfg(test)] mod tests { use super::*; #[test] fn from_u8() { assert_eq!(Color::Black, Color::from(0u8)); assert_eq!(Color::White, Color::from(1u8)); } // test all values aside from 0 and 1 which all should panic #[test] fn from_u8_panic() { for val in 2..=u8::max_value() { extern crate std; let result = std::panic::catch_unwind(|| Color::from(val)); assert!(result.is_err()); } } #[test] fn u8_conversion_black() { assert_eq!(Color::from(Color::Black.get_bit_value()), Color::Black); assert_eq!(Color::from(0u8).get_bit_value(), 0u8); } #[test] fn u8_conversion_white() { assert_eq!(Color::from(Color::White.get_bit_value()), Color::White); assert_eq!(Color::from(1u8).get_bit_value(), 1u8); } #[test] fn test_oct() { let left = OctColor::Red; let right = OctColor::Green; assert_eq!( OctColor::split_byte(OctColor::colors_byte(left, right)), Ok((left, right)) ); } }
use specs::*; use types::systemdata::IsAlive; use types::*; use component::time::*; use std::f32::consts; use std::marker::PhantomData; use std::time::Duration; use airmash_protocol::server::{PlayerUpdate, ServerPacket}; use airmash_protocol::{to_bytes, ServerKeyState, Upgrades as ServerUpgrades}; use websocket::OwnedMessage; const PI: Rotation = Rotation { value_unsafe: consts::PI, _marker: PhantomData, }; // PIX2 is less clear #[allow(non_upper_case_globals)] const PIx2: Rotation = Rotation { value_unsafe: 2.0 * consts::PI, _marker: PhantomData, }; const FRAC_PI_2: Rotation = Rotation { value_unsafe: consts::FRAC_PI_2, _marker: PhantomData, }; pub struct PositionUpdate { dirty: BitSet, modify_reader: Option<ReaderId<ModifiedFlag>>, } #[derive(SystemData)] pub struct PositionUpdateData<'a> { pos: WriteStorage<'a, Position>, rot: WriteStorage<'a, Rotation>, vel: WriteStorage<'a, Velocity>, keystate: ReadStorage<'a, KeyState>, upgrades: ReadStorage<'a, Upgrades>, powerups: ReadStorage<'a, Powerups>, planes: ReadStorage<'a, Plane>, lastframe: Read<'a, LastFrame>, thisframe: Read<'a, ThisFrame>, starttime: Read<'a, StartTime>, entities: Entities<'a>, conns: Read<'a, Connections>, is_alive: IsAlive<'a>, } impl PositionUpdate { pub fn new() -> Self { Self { dirty: BitSet::default(), modify_reader: None, } } fn step_players<'a>(data: &mut PositionUpdateData<'a>, config: &Read<'a, Config>) { let delta = Time::from(data.thisframe.0 - data.lastframe.0); let is_alive = &data.is_alive; ( &mut data.pos, &mut data.rot, &mut data.vel, &data.keystate, &data.upgrades, &data.powerups, &data.planes, &*data.entities, ).join() .filter(|(_, _, _, _, _, _, _, ent)| is_alive.get(*ent)) .for_each(|(pos, rot, vel, keystate, upgrades, powerups, plane, _)| { let mut movement_angle = None; let info = &config.planes[*plane]; let boost_factor = if keystate.boost(&plane) { info.boost_factor } else { 1.0 }; if keystate.strafe(plane) { if keystate.left { movement_angle = Some(*rot - FRAC_PI_2); } if keystate.right { movement_angle = Some(*rot + FRAC_PI_2); } } else { if keystate.left { *rot += -delta * info.turn_factor; } if keystate.right { *rot += delta * info.turn_factor; } } if keystate.up { if let Some(angle) = movement_angle { if keystate.right { movement_angle = Some(angle + PI * (-0.25)); } else if keystate.left { movement_angle = Some(angle + PI * (0.25)); } } else { movement_angle = Some(*rot); } } else if keystate.down { if let Some(angle) = movement_angle { if keystate.right { movement_angle = Some(angle + PI * (0.25)); } else if keystate.left { movement_angle = Some(angle + PI * (-0.25)); } } else { movement_angle = Some(*rot + PI); } } if let Some(angle) = movement_angle { let mult = info.accel_factor * delta * boost_factor; *vel += Vector2::new(mult * angle.sin(), mult * -angle.cos()); } let oldspeed = *vel; let speed_len = vel.length(); let mut max_speed = info.max_speed * boost_factor; let min_speed = info.min_speed; // Need to fill out config more if upgrades.speed != 0 { unimplemented!(); } if powerups.inferno { max_speed *= info.inferno_factor; } if keystate.flagspeed { max_speed = info.flag_speed; } if speed_len > max_speed { *vel *= max_speed / speed_len; } else { if vel.x.abs() > min_speed || vel.y.abs() > min_speed { let val = 1.0 - (info.brake_factor * delta).inner(); *vel *= val; } else { *vel = Velocity::default() } } *pos += oldspeed * delta + (*vel - oldspeed) * delta * 0.5; *rot = (*rot % PIx2 + PIx2) % PIx2; let bound = Position::new(Distance::new(16352.0), Distance::new(8160.0)); if pos.x.abs() > bound.x { pos.x = pos.x.signum() * bound.x } if pos.y.abs() > bound.y { pos.y = pos.y.signum() * bound.y } }); } fn send_updates<'a>( &self, data: &mut PositionUpdateData<'a>, lastupdate: &mut WriteStorage<'a, LastUpdate>, ) { let thisframe = data.thisframe.0; let starttime = data.starttime.0; ( &data.pos, &data.rot, &data.vel, &data.planes, &data.keystate, &data.upgrades, &data.powerups, &*data.entities, &self.dirty, lastupdate, ).join() .filter(|(_, _, _, _, _, _, _, ent, _, _)| data.is_alive.get(*ent)) .for_each( |(pos, rot, vel, plane, keystate, upgrades, powerups, ent, _, lastupdate)| { *lastupdate = LastUpdate(thisframe); let state = keystate.to_server(&plane); let ups = ServerUpgrades { speed: upgrades.speed, shield: powerups.shield, inferno: powerups.inferno, }; let packet = PlayerUpdate { clock: (thisframe - starttime).to_clock(), id: ent, keystate: state, pos: *pos, rot: *rot, speed: *vel, upgrades: ups, }; trace!(target: "server", "Update: {:?}", packet); data.conns.send_to_all(OwnedMessage::Binary( to_bytes(&ServerPacket::PlayerUpdate(packet)).unwrap(), )) }, ) } fn send_outdated<'a>( &self, data: &mut PositionUpdateData<'a>, lastupdate: &mut WriteStorage<'a, LastUpdate>, ) { ( &data.pos, &data.rot, &data.vel, &data.planes, &data.keystate, &data.upgrades, &data.powerups, &*data.entities, lastupdate, ).join() .filter(|(_, _, _, _, _, _, _, _, lastupdate)| { lastupdate.0.elapsed() > Duration::from_secs(1) }) .filter(|(_, _, _, _, _, _, _, ent, _)| data.is_alive.get(*ent)) .for_each( |(pos, rot, vel, plane, keystate, upgrades, powerups, ent, lastupdate)| { type Key = ServerKeyState; *lastupdate = LastUpdate(data.thisframe.0); let state = keystate.to_server(&plane); let ups = ServerUpgrades { speed: upgrades.speed, shield: powerups.shield, inferno: powerups.inferno, }; let packet = PlayerUpdate { clock: (data.thisframe.0 - data.starttime.0).to_clock(), id: ent, keystate: state, pos: *pos, rot: *rot, speed: *vel, upgrades: ups, }; trace!(target: "server", "Update: {:?}", packet); data.conns.send_to_all(OwnedMessage::Binary( to_bytes(&ServerPacket::PlayerUpdate(packet)).unwrap(), )) }, ) } } impl<'a> System<'a> for PositionUpdate { type SystemData = ( PositionUpdateData<'a>, Read<'a, Config>, WriteStorage<'a, LastUpdate>, ); fn setup(&mut self, res: &mut Resources) { Self::SystemData::setup(res); let mut storage: WriteStorage<KeyState> = SystemData::fetch(&res); self.modify_reader = Some(storage.track_modified()); } fn run(&mut self, (mut data, config, mut lastupdate): Self::SystemData) { self.dirty.clear(); data.keystate .populate_modified(&mut self.modify_reader.as_mut().unwrap(), &mut self.dirty); Self::step_players(&mut data, &config); self.send_updates(&mut data, &mut lastupdate); self.send_outdated(&mut data, &mut lastupdate); } } use dispatch::SystemInfo; use handlers::KeyHandler; impl SystemInfo for PositionUpdate { type Dependencies = KeyHandler; fn name() -> &'static str { concat!(module_path!(), "::", line!()) } fn new() -> Self { Self::new() } }
mod compile; use compile::Compile; use verified::*; const TRUE: B1 = B1; const FALSE: B0 = B0; #[test] fn can_verify_single_bool_identity_clause() { #[verify] fn f<B: Bit>(_: B) where _: Verify<{ B }>, { } f(TRUE); } #[test] fn can_verify_multiple_bool_identity_clauses() { #[verify] fn f<A: Bit, B: Bit>(_: A, _: B) where _: Verify<{ A }, { B }>, { } f(TRUE, TRUE); } #[test] fn can_verify_bool_equality_clause() { #[verify] fn f<A: Bit, B: Bit>(_: A, _: B) where _: Verify<{ A == B }>, { } f(FALSE, FALSE); } #[test] fn can_verify_bool_and_clause() { #[verify] fn f<A: Bit, B: Bit>(_: A, _: B) where _: Verify<{ A & B }>, { } f(TRUE, TRUE); } #[test] fn can_verify_bool_or_clause() { #[verify] fn f<A: Bit, B: Bit>(_: A, _: B) where _: Verify<{ A | B }>, { } f(FALSE, TRUE); f(TRUE, TRUE); f(TRUE, TRUE); } #[test] fn can_verify_bool_xor_clause() { #[verify] fn f<A: Bit, B: Bit>(_: A, _: B) where _: Verify<{ A ^ B }>, { } f(FALSE, TRUE); f(TRUE, FALSE); } #[test] fn can_verify_bool_not_clause() { #[verify] fn f<B: Bit>(_: B) where _: Verify<{ !B }>, { } f(FALSE); } #[test] fn can_verify_parenthesised_clause() { #[verify] fn f<B: Bit>(_: B) where _: Verify<{ (B) }>, { } f(TRUE); } #[test] fn can_verify_nested_binary_clauses() { #[verify] fn f<A: Bit, B: Bit, C: Bit>(_: A, _: B, _: C) where _: Verify<{ (A & (B | C)) == C }>, { } f(TRUE, FALSE, FALSE); } #[test] fn can_verify_nested_unary_clause() { #[verify] fn f<A: Bit, B: Bit>(_: A, _: B) where _: Verify<{ !(A & B) }>, { } f(TRUE, FALSE); } #[test] fn can_verify_bool_less_than_clauses() { #[verify] fn f<A: Bit, B: Bit>(_: A, _: B) where _: Verify<{ A < B }>, { } f(FALSE, TRUE); } #[test] fn can_verify_bool_greater_than_clauses() { #[verify] fn f<A: Bit, B: Bit>(_: A, _: B) where _: Verify<{ A > B }>, { } f(TRUE, FALSE); } #[test] fn can_verify_bool_less_equal_clauses() { #[verify] fn f<A: Bit, B: Bit>(_: A, _: B) where _: Verify<{ A <= B }>, { } f(FALSE, TRUE); f(FALSE, FALSE); f(TRUE, TRUE); } #[test] fn can_verify_bool_greater_equal_clauses() { #[verify] fn f<A: Bit, B: Bit>(_: A, _: B) where _: Verify<{ A >= B }>, { } f(TRUE, FALSE); f(FALSE, FALSE); f(TRUE, TRUE); } #[test] fn can_verify_bool_not_equal_clauses() { #[verify] fn f<A: Bit, B: Bit>(_: A, _: B) where _: Verify<{ A != B }>, { } f(TRUE, FALSE); } #[test] fn can_verify_usize_bitand_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned, C: Unsigned>(_: A, _: B, _: C) where _: Verify<{ (A & B) == C }>, { } f(U2::default(), U1::default(), U0::default()); f(U2::default(), U2::default(), U2::default()); } #[test] fn can_verify_usize_bitor_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned, C: Unsigned>(_: A, _: B, _: C) where _: Verify<{ (A | B) == C }>, { } f(U2::default(), U1::default(), U3::default()); f(U2::default(), U2::default(), U2::default()); } #[test] fn can_verify_usize_bitxor_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned, C: Unsigned>(_: A, _: B, _: C) where _: Verify<{ (A ^ B) == C }>, { } f(U2::default(), U1::default(), U3::default()); f(U0::default(), U0::default(), U0::default()); f(U2::default(), U2::default(), U0::default()); } #[test] fn can_verify_bool_literals() { #[verify] fn f<B: Bit>(_: B) where _: Verify<{ B == false }, { B == !true }>, { } f(FALSE); } #[test] fn can_verify_usize_literals() { #[verify] fn f<Six: Unsigned, Zero: Unsigned>(_: Six, _: Zero) where _: Verify<{ Six == 6 }, { Zero == 0 }>, { } f(U6::default(), U0::default()); } #[test] fn can_verify_usize_addition_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned>(_: A, _: B) where _: Verify<{ (A + B) == 5 }>, { } f(U2::default(), U3::default()); } #[test] fn can_verify_usize_subtraction_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned>(_: A, _: B) where _: Verify<{ (A - B) == 3 }>, { } f(U5::default(), U2::default()); } #[test] fn can_verify_usize_multiplication_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned>(_: A, _: B) where _: Verify<{ (A * B) == 8 }>, { } f(U2::default(), U4::default()); } #[test] fn can_verify_usize_division_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned>(_: A, _: B) where _: Verify<{ (A / B) == 4 }>, { } f(U8::default(), U2::default()); } #[test] fn can_verify_usize_rem_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned>(_: A, _: B) where _: Verify<{ (A % B) == 2 }>, { } f(U8::default(), U3::default()); } #[test] fn can_verify_usize_shl_clauses() { #[verify] fn f<A: Unsigned>(_: A) where _: Verify<{ (A << 2) == 4 }>, { } f(U1::default()); } #[test] fn can_verify_usize_shr_clauses() { #[verify] fn f<A: Unsigned>(_: A) where _: Verify<{ (A >> 2) == 1 }>, { } f(U4::default()); } #[test] fn can_verify_usize_less_than_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned>(_: A, _: B) where _: Verify<{ A < B }>, { } f(U2::default(), U3::default()); } #[test] fn can_verify_usize_greater_than_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned>(_: A, _: B) where _: Verify<{ A > B }>, { } f(U3::default(), U2::default()); } #[test] fn can_verify_usize_less_equal_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned>(_: A, _: B) where _: Verify<{ A <= B }>, { } f(U2::default(), U3::default()); f(U3::default(), U3::default()); } #[test] fn can_verify_usize_greater_equal_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned>(_: A, _: B) where _: Verify<{ A >= B }>, { } f(U3::default(), U2::default()); f(U3::default(), U3::default()); } #[test] fn can_verify_usize_not_equal_clauses() { #[verify] fn f<A: Unsigned, B: Unsigned>(_: A, _: B) where _: Verify<{ A != B }>, { } f(U3::default(), U2::default()); } #[test] fn can_verify_result_simple_addition_in_function_body_lhs() { #[verify] fn f<A: Unsigned, B: Unsigned, C: Unsigned>(a: A, b: B) -> C where _: Verify<{ A + B == C }>, { a + b } let _: U3 = f(U1::default(), U2::default()); } #[test] fn can_verify_result_simple_addition_in_function_body_rhs() { #[verify] fn f<A: Unsigned, B: Unsigned, C: Unsigned>(a: A, b: B) -> C where _: Verify<{ C == A + B }>, { a + b } let _: U3 = f(U1::default(), U2::default()); } #[test] fn can_verify_bools_without_braces() { #[verify] fn f<B: Bit>(_: B) where _: Verify<B>, { } f(TRUE); } #[test] fn can_verify_type_construction_in_fn_return_value() { struct Pod<V: Unsigned>(V); #[verify] fn f<A: Unsigned>() -> Pod<{ A + 2 }> where A: Unsigned, <A as Add<U2>>::Output: Unsigned, { Pod(Default::default()) } let _: Pod<U5> = f::<U3>(); } #[test] #[ignore] // TODO: figure out how to make this test pass in automation. #[allow(non_snake_case)] fn compilation_tests() { // TODO: Fix error handling. Compile( "Error_when_multiple_inferred_bounds_are_supplied.rs", " use verified::*; #[verify] fn _f<B: Bit>() where _: Verify<{ B }>, _: Verify<{ B }>, {} ", ) .and_expect( " error: did not expect to find second `Verify` bound --> $DIR/Error_when_multiple_inferred_bounds_are_supplied.rs:7:9 | 7 | _: Verify<{ B }>, | ^^^^^^^^^^^^^^^^ ", ); Compile( "Error_when_inferred_bound_is_not_Verify.rs", " use verified::*; #[verify] fn _f<B: Bit>() where _: SomethingElse<{ B }>, {} ", ) .and_expect( " error: expected `Verify<_>` --> $DIR/Error_when_inferred_bound_is_not_Verify.rs:6:12 | 6 | _: SomethingElse<{ B }>, | ^^^^^^^^^^^^^^^^^^^^ ", ); Compile( "Error_on_unsupported_expression_in_clause.rs", " use verified::*; #[verify] fn _f<A: Bit, B: Bit>() where _: Verify<{ A *= B }>, {} ", ) .and_expect( " error: unsupported logical expression --> $DIR/Error_on_unsupported_expression_in_clause.rs:6:21 | 6 | _: Verify<{ A *= B }>, | ^^^^^^ ", ); Compile( "Error_on_unsupported_literal_in_clause.rs", " use verified::*; #[verify] fn _f<N: Unsigned>() where _: Verify<{ N == \"abc\" }>, {} ", ) .and_expect( " error: only bool and int literals are supported here --> $DIR/Error_on_unsupported_literal_in_clause.rs:6:26 | 6 | _: Verify<{ N == \"abc\" }>, | ^^^^^ ", ); }
use sdl2::render::{WindowCanvas, Texture, TextureQuery}; use std::path::Path; use sdl2::image::LoadTexture; use sdl2::rect::{Point, Rect}; use sdl2::ttf::FontStyle; use sdl2::pixels::Color; use crate::widgets::{Button, Image, Text}; macro_rules! rect( ($x:expr, $y:expr, $w:expr, $h:expr) => ( Rect::new($x as i32, $y as i32, $w as u32, $h as u32) ) ); pub fn clear_canvas( canvas: &mut WindowCanvas ) { canvas.clear(); } pub fn canvas_present( canvas: &mut WindowCanvas ) { canvas.present(); } pub fn render_background(canvas: &mut WindowCanvas, texture: &Texture) -> Result<(), String> { canvas.copy(&texture, None, None)?; Ok(()) } pub fn render_image( canvas: &mut WindowCanvas, texture: &Texture, position: Point, sprite: Rect, width : i32, height: i32, ) -> Image { let (canvas_width, canvas_height) = canvas.output_size() .expect("Falha ao ler o tamanho do canvas."); let screen_position = position + Point::new( canvas_width as i32 / 2, canvas_height as i32 / 2 ); let image = Rect::from_center( screen_position, width as u32, height as u32 ); canvas.copy(&texture, sprite, image) .expect("Erro inexperado ao renderizar textura."); Image { location: image } } pub fn render_button( canvas: &mut WindowCanvas, texture: &Texture, position: Point, size: (u32, u32) ) -> Button { let (canvas_width, canva_height) = canvas.output_size() .expect("Falha ao obter o tamanho do canvas."); let screen_position = position + Point::new( canvas_width as i32 / 2, canva_height as i32 / 2, ); let button = Rect::from_center( screen_position, size.0, size.1, ); canvas.copy(&texture, None, button) .expect("Erro inexperado ao renderizar a textura."); Button { location: button } } fn get_centered_rect( canvas: &mut WindowCanvas, rect_width: u32, rect_height: u32, cons_width: u32, cons_height: u32 ) -> Rect { let wr = rect_width as f32 / cons_width as f32; let hr = rect_height as f32 / cons_height as f32; let (w, h) = if wr > 1f32 || hr > 1f32 { if wr > hr { let h = (rect_height as f32 / wr) as i32; (cons_width as i32, h) } else { let w = (rect_width as f32 / hr) as i32; (w, cons_height as i32) } } else { (rect_width as i32, rect_height as i32) }; let (canvas_width, canvas_height) = canvas.output_size() .expect("Falha ao ler o tamanho do canvas."); let cx = (canvas_width as i32 - w) / 2; let cy = (canvas_height as i32 - h) / 2; rect!(cx, cy, w, h) } pub fn render_text( canvas: &mut WindowCanvas, font_path: &Path, text: &str, size: u16, style: FontStyle, color: Color, position: Point ) { let ttf_context = sdl2::ttf::init().unwrap(); let mut font = ttf_context.load_font(font_path, size).unwrap(); font.set_style(style); let surface = font.render(text) .blended(color).unwrap(); let texture_creator = canvas.texture_creator(); let mut texture = texture_creator.create_texture_from_surface(&surface).unwrap(); render_font(canvas, &texture, position); } pub fn render_font( canvas: &mut WindowCanvas, texture: &Texture, position: Point, ) -> Text { let (canvas_width, canvas_height) = canvas.output_size() .expect("Falha ao ler o tamanho do canvas."); let screen_position = position + Point::new( canvas_width as i32 / 2, canvas_height as i32 / 2 ); let TextureQuery { width, height, .. } = texture.query(); let text = Rect::from_center( screen_position, width as u32, height as u32 ); canvas.copy(&texture, None, text) .expect("Erro inexperado ao renderizar textura."); Text { location: text, } }
use std::io::{stdin, Read, StdinLock}; use std::str::FromStr; #[allow(dead_code)] struct Scanner<'a> { cin: StdinLock<'a>, } #[allow(dead_code)] impl<'a> Scanner<'a> { fn new(cin: StdinLock<'a>) -> Scanner<'a> { Scanner { cin } } fn read<T: FromStr>(&mut self) -> Option<T> { let token = self.cin.by_ref().bytes().map(|c| c.unwrap() as char) .skip_while(|c| c.is_whitespace()) .take_while(|c| !c.is_whitespace()) .collect::<String>(); token.parse::<T>().ok() } fn input<T: FromStr>(&mut self) -> T { self.read().unwrap() } fn vec<T: FromStr>(&mut self, len: usize) -> Vec<T> { (0..len).map(|_| self.input()).collect() } fn mat<T: FromStr>(&mut self, row: usize, col: usize) -> Vec<Vec<T>> { (0..row).map(|_| self.vec(col)).collect() } } fn main() { let cin = stdin(); let cin = cin.lock(); let mut sc = Scanner::new(cin); let s: String = sc.input(); let mut chs: Vec<usize> = s .chars() .into_iter() .map(|x| x.to_digit(10).unwrap() as usize) .collect(); chs.reverse(); let mut deg = 1usize; let mut cnt = [0; 2019]; cnt[0] = 1; let mut cur = 0usize; for ch in chs { cur += ch * deg; cur %= 2019; deg *= 10; deg %= 2019; cnt[cur] += 1; } let ans = cnt.iter().fold(0, |acc, x| acc + x * (x - 1) / 2); println!("{}", ans); }
extern crate rustty; use rustty::{ Terminal, Event, HasSize, CellAccessor }; use rustty::ui::core::{ Widget, HorizontalAlign, VerticalAlign, ButtonResult }; use rustty::ui::{ Dialog, StdButton, Canvas }; const BLOCK: char = '\u{25AA}'; fn create_optiondlg() -> Dialog { let mut optiondlg = Dialog::new(50, 6); let mut inc_b = StdButton::new("+ :Increase Radius", '+', ButtonResult::Custom(1)); inc_b.pack(&optiondlg, HorizontalAlign::Left, VerticalAlign::Top, (1,1)); let mut dec_b = StdButton::new("- :Decrease Radius", '-', ButtonResult::Custom(2)); dec_b.pack(&optiondlg, HorizontalAlign::Left, VerticalAlign::Top, (1,2)); let mut quit_b = StdButton::new("Quit", 'q', ButtonResult::Ok); quit_b.pack(&optiondlg, HorizontalAlign::Left, VerticalAlign::Top, (1,3)); optiondlg.add_button(inc_b); optiondlg.add_button(dec_b); optiondlg.add_button(quit_b); optiondlg.draw_box(); optiondlg } fn main() { // Create our terminal, dialog frame and main canvas let mut term = Terminal::new().unwrap(); let mut optiondlg = create_optiondlg(); let mut canvas = Canvas::new(term.size().0, term.size().1 - 4); // Align canvas to top left, and dialog to bottom right optiondlg.pack(&term, HorizontalAlign::Right, VerticalAlign::Bottom, (0,0)); canvas.pack(&term, HorizontalAlign::Left, VerticalAlign::Top, (0,0)); let mut radius = 10u32; 'main: loop { while let Some(Event::Key(ch)) = term.get_event(0).unwrap() { match optiondlg.result_for_key(ch) { Some(ButtonResult::Ok) => break 'main, Some(ButtonResult::Custom(i)) => { radius = if i == 1 { radius.saturating_add(1) } else { radius.saturating_sub(1) }; }, _ => {}, } } // Grab the size of the canvas let (cols, rows) = canvas.size(); let (cols, rows) = (cols as isize, rows as isize); let (a, b) = (cols / 2, rows / 2); // Main render loop, draws the circle to canvas for i in 0..cols*rows { let y = i as isize / cols; let x = i as isize % cols; let mut cell = canvas.get_mut(x as usize, y as usize).unwrap(); if ((x - a).pow(2)/4 + (y - b).pow(2)) <= radius.pow(2) as isize { cell.set_ch(BLOCK); } else { cell.set_ch(' '); } } // draw the canvas, dialog frame and swap buffers canvas.draw(&mut term); optiondlg.draw(&mut term); term.swap_buffers().unwrap(); } }
extern crate lab_common; use std::hint::unreachable_unchecked; use lab_common::get_user_input; fn get_name() -> String { get_user_input( Some("Enter your first and last name separate by a space: ") ) } fn part1() { let mut name: String = get_name(); while let None = name.find(' ') { println!("You must enter both a first and last name separated by a space!"); name = get_name(); } let space_index: usize = name.find(' ') .unwrap_or_else(|| unsafe { unreachable_unchecked() }); let (first_name, last_name) = name.split_at(space_index); println!("{}, {}", last_name.trim_left(), first_name); } fn display_status(tuple: (isize, bool)) { let message: &'static str = match tuple { (i, true) if i >= 0 && i <= 10 => "Launch", (i, true) if i >= 11 && i <= 50 => "Standby", (i, false) if i >= 11 && i <= 50 => "Reboot", (i, false) if i >= 0 && i <= 10 => "Miss", _ => "Abort", }; println!("{}", message); } fn part2() { let status: (isize, bool) = (9isize, false); display_status(status); } fn collatz(mut n: usize) -> usize { let mut count = 0; loop { match n { 1 => { break; }, nn if nn % 2 == 1 => { n = 3 * n + 1 }, nn if nn % 2 == 0 => { n /= 2 }, _ => unsafe { unreachable_unchecked() }, } count += 1; } count } const CONJECTURE_MSG: &'static str = "Enter a positive integer > 0 with which to test the Collatz Conjecture: "; fn part3() { let mut input: usize = get_user_input(Some(CONJECTURE_MSG)); while input == 0 { println!("You must enter a number greater than zero!"); input = get_user_input(Some(CONJECTURE_MSG)); } let count = collatz(input); println!("Iterations: {}", count); } fn main() { part1(); part2(); loop { part3(); } }
use serde::{Deserialize, Serialize}; use tracing::instrument; use htsget_config::types::Query; use crate::{QueryBuilder, Result}; /// A struct to represent a POST request according to the /// [HtsGet specification](https://samtools.github.io/hts-specs/htsget.html). It implements /// [Deserialize] to make it more ergonomic. Each `PostRequest` can contain several regions. #[derive(Serialize, Deserialize, Debug, Default)] pub struct PostRequest { pub format: Option<String>, pub class: Option<String>, pub fields: Option<Vec<String>>, pub tags: Option<Vec<String>>, pub notags: Option<Vec<String>>, pub regions: Option<Vec<Region>>, } /// A struct that contains the data to quest for a specific region. It is only meant to be use /// alongside a `PostRequest` #[derive(Serialize, Deserialize, Debug)] pub struct Region { #[serde(rename = "referenceName")] pub reference_name: String, pub start: Option<u32>, pub end: Option<u32>, } impl PostRequest { /// Converts the `PostRequest` into one or more equivalent [Queries](Query) #[instrument(level = "trace", skip_all, ret)] pub(crate) fn get_queries(self, id: impl Into<String>) -> Result<Vec<Query>> { if let Some(ref regions) = self.regions { let id = id.into(); regions .iter() .map(|region| { Ok( self .get_base_query_builder(id.clone())? .with_reference_name(Some(region.reference_name.clone())) .with_range_from_u32(region.start, region.end)? .build(), ) }) .collect::<Result<Vec<Query>>>() } else { Ok(vec![self.get_base_query_builder(id)?.build()]) } } fn get_base_query_builder(&self, id: impl Into<String>) -> Result<QueryBuilder> { QueryBuilder::new(Some(id.into()), self.format.clone())? .with_class(self.class.clone())? .with_fields_from_vec(self.fields.clone()) .with_tags_from_vec(self.tags.clone(), self.notags.clone()) } } #[cfg(test)] mod tests { use htsget_config::types::{Class, Format}; use super::*; #[test] fn post_request_without_regions() { assert_eq!( PostRequest { format: Some("VCF".to_string()), class: Some("header".to_string()), fields: None, tags: None, notags: None, regions: None, } .get_queries("id") .unwrap(), vec![Query::new("id", Format::Vcf).with_class(Class::Header)] ); } #[test] fn post_request_with_one_region() { assert_eq!( PostRequest { format: Some("VCF".to_string()), class: Some("header".to_string()), fields: None, tags: None, notags: None, regions: Some(vec![Region { reference_name: "20".to_string(), start: Some(150), end: Some(153), }]), } .get_queries("id") .unwrap(), vec![Query::new("id", Format::Vcf) .with_class(Class::Header) .with_reference_name("20".to_string()) .with_start(150) .with_end(153)] ); } #[test] fn post_request_with_regions() { assert_eq!( PostRequest { format: Some("VCF".to_string()), class: Some("header".to_string()), fields: None, tags: None, notags: None, regions: Some(vec![ Region { reference_name: "20".to_string(), start: Some(150), end: Some(153), }, Region { reference_name: "11".to_string(), start: Some(152), end: Some(154), } ]), } .get_queries("id") .unwrap(), vec![ Query::new("id", Format::Vcf) .with_class(Class::Header) .with_reference_name("20".to_string()) .with_start(150) .with_end(153), Query::new("id", Format::Vcf) .with_class(Class::Header) .with_reference_name("11".to_string()) .with_start(152) .with_end(154) ] ); } }
mod common; use common::{testenv, TestEnv, EXAMPLE_UNSUPPORTED}; use predicates::prelude::*; use rstest::rstest; const EXAMPLE_NOT_EXISTING: &str = "just/some/random/path"; const EXAMPLE_DIR: &str = "."; #[rstest] #[case(&["info", EXAMPLE_UNSUPPORTED.to_str().unwrap()])] #[case(&["preview", EXAMPLE_UNSUPPORTED.to_str().unwrap()])] #[case(&["set", EXAMPLE_UNSUPPORTED.to_str().unwrap()])] #[case(&["unpack", EXAMPLE_UNSUPPORTED.to_str().unwrap(), "out"])] fn test_unsupported_image(testenv: TestEnv, #[case] args: &[&str]) { let expected_message = "Error: only HEIF files are supported"; testenv .run(args) .failure() .stderr(predicate::str::contains(expected_message)); } #[rstest] #[case(&["info", EXAMPLE_NOT_EXISTING])] #[case(&["preview", EXAMPLE_NOT_EXISTING])] #[case(&["set", EXAMPLE_NOT_EXISTING])] #[case(&["unpack", EXAMPLE_NOT_EXISTING, "out"])] fn test_not_existing_path(testenv: TestEnv, #[case] args: &[&str]) { let expected_message = format!("Error: file '{EXAMPLE_NOT_EXISTING}' is not accessible"); testenv .run(args) .failure() .stderr(predicate::str::contains(expected_message)); } #[rstest] #[case(&["info", EXAMPLE_DIR])] #[case(&["preview", EXAMPLE_DIR])] #[case(&["set", EXAMPLE_DIR])] #[case(&["unpack", EXAMPLE_DIR, "out"])] fn test_dir_path(testenv: TestEnv, #[case] args: &[&str]) { let expected_message = format!("Error: '{EXAMPLE_DIR}' is not a file"); testenv .run(args) .failure() .stderr(predicate::str::contains(expected_message)); }
use once_cell::sync::Lazy; use rusqlite::Connection; use std::sync::Mutex; pub static CONN: Lazy<Mutex<Connection>> = Lazy::new(|| Mutex::new(Connection::open("blog.db").unwrap()));
use azure_core::headers::{ account_kind_from_headers, date_from_headers, request_id_from_headers, sku_name_from_headers, }; use azure_core::RequestId; use chrono::{DateTime, Utc}; use http::HeaderMap; #[derive(Debug, Clone)] pub struct GetAccountInformationResponse { pub request_id: RequestId, pub date: DateTime<Utc>, pub sku_name: String, pub account_kind: String, } impl GetAccountInformationResponse { pub(crate) fn from_headers( headers: &HeaderMap, ) -> crate::Result<GetAccountInformationResponse> { let request_id = request_id_from_headers(headers)?; let date = date_from_headers(headers)?; let sku_name = sku_name_from_headers(headers)?; let account_kind = account_kind_from_headers(headers)?; Ok(GetAccountInformationResponse { request_id, date, sku_name, account_kind, }) } }
#[doc = "Register `DDRCTRL_INIT1` reader"] pub type R = crate::R<DDRCTRL_INIT1_SPEC>; #[doc = "Register `DDRCTRL_INIT1` writer"] pub type W = crate::W<DDRCTRL_INIT1_SPEC>; #[doc = "Field `PRE_OCD_X32` reader - PRE_OCD_X32"] pub type PRE_OCD_X32_R = crate::FieldReader; #[doc = "Field `PRE_OCD_X32` writer - PRE_OCD_X32"] pub type PRE_OCD_X32_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>; #[doc = "Field `DRAM_RSTN_X1024` reader - DRAM_RSTN_X1024"] pub type DRAM_RSTN_X1024_R = crate::FieldReader<u16>; #[doc = "Field `DRAM_RSTN_X1024` writer - DRAM_RSTN_X1024"] pub type DRAM_RSTN_X1024_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 9, O, u16>; impl R { #[doc = "Bits 0:3 - PRE_OCD_X32"] #[inline(always)] pub fn pre_ocd_x32(&self) -> PRE_OCD_X32_R { PRE_OCD_X32_R::new((self.bits & 0x0f) as u8) } #[doc = "Bits 16:24 - DRAM_RSTN_X1024"] #[inline(always)] pub fn dram_rstn_x1024(&self) -> DRAM_RSTN_X1024_R { DRAM_RSTN_X1024_R::new(((self.bits >> 16) & 0x01ff) as u16) } } impl W { #[doc = "Bits 0:3 - PRE_OCD_X32"] #[inline(always)] #[must_use] pub fn pre_ocd_x32(&mut self) -> PRE_OCD_X32_W<DDRCTRL_INIT1_SPEC, 0> { PRE_OCD_X32_W::new(self) } #[doc = "Bits 16:24 - DRAM_RSTN_X1024"] #[inline(always)] #[must_use] pub fn dram_rstn_x1024(&mut self) -> DRAM_RSTN_X1024_W<DDRCTRL_INIT1_SPEC, 16> { DRAM_RSTN_X1024_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "DDRCTRL SDRAM initialization register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ddrctrl_init1::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ddrctrl_init1::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct DDRCTRL_INIT1_SPEC; impl crate::RegisterSpec for DDRCTRL_INIT1_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`ddrctrl_init1::R`](R) reader structure"] impl crate::Readable for DDRCTRL_INIT1_SPEC {} #[doc = "`write(|w| ..)` method takes [`ddrctrl_init1::W`](W) writer structure"] impl crate::Writable for DDRCTRL_INIT1_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets DDRCTRL_INIT1 to value 0"] impl crate::Resettable for DDRCTRL_INIT1_SPEC { const RESET_VALUE: Self::Ux = 0; }
pub fn foo() { let _x = 5; }
//! Render utilities for graphics backend for game engine. use std::collections::HashSet; use std::iter; use std::sync::Arc; use egui::{ClippedMesh, Texture, TextureId}; use image::RgbaImage; use vulkano::buffer::{BufferUsage, DeviceLocalBuffer}; use vulkano::command_buffer::{ AutoCommandBufferBuilder, CommandBufferUsage, PrimaryAutoCommandBuffer, }; use vulkano::device::physical::PhysicalDevice; use vulkano::device::{Device, DeviceExtensions, Features, Queue}; use vulkano::format::Format; use vulkano::image::view::ImageView; use vulkano::image::{ImageDimensions, ImageUsage, ImmutableImage, MipmapsCount, SwapchainImage}; use vulkano::instance::debug::{DebugCallback, MessageSeverity, MessageType}; use vulkano::instance::Instance; use vulkano::swapchain::{AcquireError, PresentMode, Surface, Swapchain}; use vulkano::sync::{FlushError, GpuFuture, SharingMode}; use vulkano::{swapchain, sync}; use vulkano_win::VkSurfaceBuild; use winit::dpi::LogicalSize; use winit::event_loop::EventLoop; use winit::window::{Window, WindowBuilder}; pub use error::RendererCreationError; use error::{ImageRegisterError, RenderError, ResizeError, TransferCommandBufferCreationError}; use crate::config::Config; use super::{ camera::CameraUBO, frame::{ object_draw::ObjectDrawSystem, system::{FrameSystem, Pass}, ui_draw::UiDrawSystem, }, utils, }; pub mod error; /// System that renders all game objects and UI. #[allow(dead_code)] pub struct Renderer { previous_frame_end: Option<Box<dyn GpuFuture + Send + Sync>>, recreate_swapchain: bool, camera_ubo: CameraUBO, ui_draw_system: UiDrawSystem, object_draw_system: ObjectDrawSystem, frame_system: FrameSystem, uniform_buffers: Vec<Arc<DeviceLocalBuffer<CameraUBO>>>, swapchain_images: Vec<Arc<SwapchainImage<Window>>>, swapchain: Arc<Swapchain<Window>>, graphics_queue: Arc<Queue>, present_queue: Arc<Queue>, transfer_queue: Arc<Queue>, device: Arc<Device>, surface: Arc<Surface<Window>>, debug_callback: Option<DebugCallback>, instance: Arc<Instance>, } impl Renderer { /// Creates render system. pub fn new<T>(config: &Config, event_loop: &EventLoop<T>) -> Result<Self, RendererCreationError> where T: 'static, { let instance = utils::create_instance(config)?; log::info!( "max version of Vulkan instance is {}", instance.max_api_version(), ); let debug_callback = config .enable_validation() .then(|| { use super::debug_callback::create_debug_callback as new; let debug_callback = new(&instance, MessageSeverity::all(), MessageType::all())?; log::info!("debug callback was attached to the instance"); Result::<_, RendererCreationError>::Ok(debug_callback) }) .transpose()?; let surface = WindowBuilder::new() .with_title(config.name()) .with_min_inner_size(LogicalSize::new(250, 100)) .with_visible(false) .build_vk_surface(event_loop, instance.clone())?; log::info!("window & surface initialized successfully"); let physical_devices = PhysicalDevice::enumerate(&instance); log::info!("enumerated {} physical devices", physical_devices.len()); let required_extensions = DeviceExtensions { khr_swapchain: true, ..DeviceExtensions::none() }; let required_features = Features::none(); let utils::SuitablePhysicalDevice { physical_device, graphics_family, present_family, transfer_family, } = utils::suitable_physical_device( physical_devices, &surface, &required_extensions, &required_features, ) .ok_or_else(|| RendererCreationError::NoSuitablePhysicalDevice)?; log::info!( r#"using device "{}" of type "{:?}" with Vulkan version {}"#, physical_device.properties().device_name, physical_device.properties().device_type, physical_device.api_version(), ); let (device, mut queues) = { let priorities = 1.0; let unique_queue_families = { let unique_queue_families: HashSet<_> = [ graphics_family.id(), present_family.unwrap_or(graphics_family).id(), transfer_family.unwrap_or(graphics_family).id(), ] .iter() .cloned() .collect(); unique_queue_families.into_iter().map(|family| { ( physical_device.queue_family_by_id(family).unwrap(), priorities, ) }) }; let required_extensions = physical_device .required_extensions() .union(&required_extensions); Device::new( physical_device, &required_features, &required_extensions, unique_queue_families, )? }; let graphics_queue = queues.next().unwrap(); let present_queue = queues.next().unwrap_or_else(|| graphics_queue.clone()); let transfer_queue = queues.next().unwrap_or_else(|| graphics_queue.clone()); let (swapchain, swapchain_images) = { let capabilities = surface.capabilities(physical_device)?; let (format, color_space) = utils::suitable_image_format(&capabilities); let present_mode = capabilities .present_modes .iter() .find(|&mode| mode == PresentMode::Mailbox) .unwrap_or(PresentMode::Fifo); let dimensions = if let Some(current_extent) = capabilities.current_extent { current_extent } else { let window_size = surface.window().inner_size(); let min_width = capabilities.min_image_extent[0]; let max_width = capabilities.max_image_extent[0]; let min_height = capabilities.min_image_extent[1]; let max_height = capabilities.max_image_extent[1]; [ window_size.width.clamp(min_width, max_width), window_size.height.clamp(min_height, max_height), ] }; let image_count = { let image_count = capabilities.min_image_count + 1; if let Some(max_image_count) = capabilities.max_image_count { image_count.max(max_image_count) } else { image_count } }; let sharing_mode = present_family .as_ref() .map(|present_family| { (present_family.id() != graphics_family.id()).then(|| { let queues = [&graphics_queue, &present_queue]; SharingMode::from(&queues[..]) }) }) .flatten() .unwrap_or_else(|| SharingMode::from(&graphics_queue)); Swapchain::start(device.clone(), surface.clone()) .format(format) .color_space(color_space) .present_mode(present_mode) .dimensions(dimensions) .num_images(image_count) .transform(capabilities.current_transform) .sharing_mode(sharing_mode) .usage(ImageUsage::color_attachment()) .build()? }; let uniform_buffers = swapchain_images .iter() .map(|_| { DeviceLocalBuffer::new( device.clone(), BufferUsage::uniform_buffer_transfer_destination(), iter::once(transfer_queue.family()), ) }) .collect::<Result<Vec<_>, _>>()?; let frame_system = FrameSystem::new(graphics_queue.clone(), swapchain.format())?; let object_draw_system = ObjectDrawSystem::new(graphics_queue.clone(), frame_system.object_subpass())?; let ui_draw_system = UiDrawSystem::new(graphics_queue.clone(), frame_system.ui_subpass())?; let previous_frame_end = Some(Box::new(sync::now(device.clone())) as Box<_>); Ok(Self { instance, debug_callback, surface, device, graphics_queue, present_queue, transfer_queue, swapchain, swapchain_images, uniform_buffers, frame_system, object_draw_system, ui_draw_system, camera_ubo: CameraUBO::default(), previous_frame_end, recreate_swapchain: false, }) } /// Underlying window of render system. pub fn window(&self) -> &Window { self.surface.window() } /// Resize the underlying window and update Vulkan objects. pub fn resize(&mut self) -> Result<(), ResizeError> { let dimensions = self.window().inner_size().into(); let (swapchain, swapchain_images) = self.swapchain.recreate().dimensions(dimensions).build()?; self.swapchain = swapchain; self.swapchain_images = swapchain_images; self.recreate_swapchain = false; Ok(()) } pub fn set_camera_ubo(&mut self, ubo: CameraUBO) { self.camera_ubo = ubo; } /// Create command buffer for transfer operations which will be executed /// before actual rendering. fn transfer_cb( &self, image_index: usize, ) -> Result<PrimaryAutoCommandBuffer, TransferCommandBufferCreationError> { let uniform_buffer = self.uniform_buffers[image_index].clone(); let mut builder = AutoCommandBufferBuilder::primary( self.device.clone(), self.transfer_queue.family(), CommandBufferUsage::OneTimeSubmit, )?; builder.update_buffer(uniform_buffer, Box::new(self.camera_ubo))?; Ok(builder.build()?) } pub fn register_ui_image( &mut self, image: &RgbaImage, ) -> Result<TextureId, ImageRegisterError> { let pixels: Vec<_> = image.pixels().flat_map(|p| p.0).collect(); let (image, future) = ImmutableImage::from_iter( pixels, ImageDimensions::Dim2d { width: image.width(), height: image.height(), array_layers: 1, }, MipmapsCount::One, Format::R8G8B8A8_SRGB, // todo: remove hardcoded format self.transfer_queue.clone(), )?; future.flush()?; let image_view = ImageView::new(image)?; Ok(self.ui_draw_system.register_texture(image_view)?) } /// Render new frame into the underlying window. pub fn render( &mut self, mut ui: Option<(Vec<ClippedMesh>, Arc<Texture>)>, ) -> Result<(), RenderError> { self.previous_frame_end.as_mut().unwrap().cleanup_finished(); if self.recreate_swapchain { self.resize()?; } let (image_index, suboptimal, acquire_future) = match swapchain::acquire_next_image(self.swapchain.clone(), None) { Ok(r) => r, Err(AcquireError::OutOfDate) => { self.recreate_swapchain = true; return Ok(()); } Err(err) => return Err(RenderError::AcquireNextImage(err)), }; self.recreate_swapchain = suboptimal; let transfer_command_buffer = self.transfer_cb(image_index)?; let previous_frame_end = self.previous_frame_end.take().unwrap(); let before_future = previous_frame_end .join(acquire_future) .then_execute(self.transfer_queue.clone(), transfer_command_buffer)? .then_signal_semaphore(); let scale_factor = self.window().scale_factor() as f32; let graphics_future = { let mut frame = self .frame_system .frame(before_future, self.swapchain_images[image_index].clone())?; let mut graphics_future = Box::new(sync::now(self.device.clone())) as Box<_>; while let Some(next_pass) = frame.next_pass()? { match next_pass { Pass::Deferred(mut draw_pass) => { let uniform_buffer = self.uniform_buffers[image_index].clone(); let command_buffer = self .object_draw_system .draw(draw_pass.viewport_size(), uniform_buffer)?; draw_pass.execute(command_buffer)?; } Pass::UI(mut ui_pass) => { if let Some((meshes, texture)) = ui.take() { let command_buffer = self.ui_draw_system.draw( ui_pass.viewport_size(), scale_factor, meshes, texture, )?; ui_pass.execute(command_buffer)?; } } Pass::Finished(future) => { graphics_future = future; } } } graphics_future }; let future = graphics_future .then_swapchain_present( self.present_queue.clone(), self.swapchain.clone(), image_index, ) .then_signal_fence_and_flush(); match future { Ok(future) => { self.previous_frame_end = Some(Box::new(future)); Ok(()) } Err(FlushError::OutOfDate) => { self.recreate_swapchain = true; self.previous_frame_end = Some(Box::new(sync::now(self.device.clone()))); Ok(()) } Err(err) => { self.previous_frame_end = Some(Box::new(sync::now(self.device.clone()))); Err(RenderError::SubmitQueue(err)) } } } }
/* * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT license. */ use byteorder::{LittleEndian, ReadBytesExt}; use rand::distributions::{Distribution, Uniform}; use std::fs::File; use std::io::{Read, Seek, SeekFrom, Write}; use std::mem; use crate::common::{ANNError, ANNResult}; use crate::utils::CachedReader; use crate::utils::{ convert_types_u32_usize, convert_types_u64_usize, convert_types_usize_u32, convert_types_usize_u64, convert_types_usize_u8, save_bin_f32, save_bin_u32, save_bin_u64, }; use crate::utils::{file_exists, load_bin, open_file_to_write, METADATA_SIZE}; #[derive(Debug)] pub struct PQStorage { /// Pivot table path pivot_file: String, /// Compressed pivot path compressed_pivot_file: String, /// Data used to construct PQ table and PQ compressed table pq_data_file: String, /// PQ data reader pq_data_file_reader: File, } impl PQStorage { pub fn new( pivot_file: &str, compressed_pivot_file: &str, pq_data_file: &str, ) -> std::io::Result<Self> { let pq_data_file_reader = File::open(pq_data_file)?; Ok(Self { pivot_file: pivot_file.to_string(), compressed_pivot_file: compressed_pivot_file.to_string(), pq_data_file: pq_data_file.to_string(), pq_data_file_reader, }) } pub fn write_compressed_pivot_metadata(&self, npts: i32, pq_chunk: i32) -> std::io::Result<()> { let mut writer = open_file_to_write(&self.compressed_pivot_file)?; writer.write_all(&npts.to_le_bytes())?; writer.write_all(&pq_chunk.to_le_bytes())?; Ok(()) } pub fn write_compressed_pivot_data( &self, compressed_base: &[usize], num_centers: usize, block_size: usize, num_pq_chunks: usize, ) -> std::io::Result<()> { let mut writer = open_file_to_write(&self.compressed_pivot_file)?; writer.seek(SeekFrom::Start((std::mem::size_of::<i32>() * 2) as u64))?; if num_centers > 256 { writer.write_all(unsafe { std::slice::from_raw_parts( compressed_base.as_ptr() as *const u8, block_size * num_pq_chunks * std::mem::size_of::<usize>(), ) })?; } else { let compressed_base_u8 = convert_types_usize_u8(compressed_base, block_size, num_pq_chunks); writer.write_all(&compressed_base_u8)?; } Ok(()) } pub fn write_pivot_data( &self, full_pivot_data: &[f32], centroid: &[f32], chunk_offsets: &[usize], num_centers: usize, dim: usize, ) -> std::io::Result<()> { let mut cumul_bytes: Vec<usize> = vec![0; 4]; cumul_bytes[0] = METADATA_SIZE; cumul_bytes[1] = cumul_bytes[0] + save_bin_f32( &self.pivot_file, full_pivot_data, num_centers, dim, cumul_bytes[0], )?; cumul_bytes[2] = cumul_bytes[1] + save_bin_f32(&self.pivot_file, centroid, dim, 1, cumul_bytes[1])?; // Because the writer only can write u32, u64 but not usize, so we need to convert the type first. let chunk_offsets_u64 = convert_types_usize_u32(chunk_offsets, chunk_offsets.len(), 1); cumul_bytes[3] = cumul_bytes[2] + save_bin_u32( &self.pivot_file, &chunk_offsets_u64, chunk_offsets.len(), 1, cumul_bytes[2], )?; let cumul_bytes_u64 = convert_types_usize_u64(&cumul_bytes, 4, 1); save_bin_u64(&self.pivot_file, &cumul_bytes_u64, cumul_bytes.len(), 1, 0)?; Ok(()) } pub fn pivot_data_exist(&self) -> bool { file_exists(&self.pivot_file) } pub fn read_pivot_metadata(&self) -> std::io::Result<(usize, usize)> { let (_, file_num_centers, file_dim) = load_bin::<f32>(&self.pivot_file, METADATA_SIZE)?; Ok((file_num_centers, file_dim)) } pub fn load_pivot_data( &self, num_pq_chunks: &usize, num_centers: &usize, dim: &usize, ) -> ANNResult<(Vec<f32>, Vec<f32>, Vec<usize>)> { // Load file offset data. File saved as offset data(4*1) -> pivot data(centroid num*dim) -> centroid of dim data(dim*1) -> chunk offset data(chunksize+1*1) // Because we only can write u64 rather than usize, so the file stored as u64 type. Need to convert to usize when use. let (data, offset_num, nc) = load_bin::<u64>(&self.pivot_file, 0)?; let file_offset_data = convert_types_u64_usize(&data, offset_num, nc); if offset_num != 4 { let error_message = format!("Error reading pq_pivots file {}. Offsets don't contain correct metadata, # offsets = {}, but expecting 4.", &self.pivot_file, offset_num); return Err(ANNError::log_pq_error(error_message)); } let (data, pivot_num, pivot_dim) = load_bin::<f32>(&self.pivot_file, file_offset_data[0])?; let full_pivot_data = data; if pivot_num != *num_centers || pivot_dim != *dim { let error_message = format!("Error reading pq_pivots file {}. file_num_centers = {}, file_dim = {} but expecting {} centers in {} dimensions.", &self.pivot_file, pivot_num, pivot_dim, num_centers, dim); return Err(ANNError::log_pq_error(error_message)); } let (data, centroid_dim, nc) = load_bin::<f32>(&self.pivot_file, file_offset_data[1])?; let centroid = data; if centroid_dim != *dim || nc != 1 { let error_message = format!("Error reading pq_pivots file {}. file_dim = {}, file_cols = {} but expecting {} entries in 1 dimension.", &self.pivot_file, centroid_dim, nc, dim); return Err(ANNError::log_pq_error(error_message)); } let (data, chunk_offset_number, nc) = load_bin::<u32>(&self.pivot_file, file_offset_data[2])?; let chunk_offsets = convert_types_u32_usize(&data, chunk_offset_number, nc); if chunk_offset_number != *num_pq_chunks + 1 || nc != 1 { let error_message = format!("Error reading pq_pivots file at chunk offsets; file has nr={}, nc={} but expecting nr={} and nc=1.", chunk_offset_number, nc, num_pq_chunks + 1); return Err(ANNError::log_pq_error(error_message)); } Ok((full_pivot_data, centroid, chunk_offsets)) } pub fn read_pq_data_metadata(&mut self) -> std::io::Result<(usize, usize)> { let npts_i32 = self.pq_data_file_reader.read_i32::<LittleEndian>()?; let dim_i32 = self.pq_data_file_reader.read_i32::<LittleEndian>()?; let num_points = npts_i32 as usize; let dim = dim_i32 as usize; Ok((num_points, dim)) } pub fn read_pq_block_data<T: Copy>( &mut self, cur_block_size: usize, dim: usize, ) -> std::io::Result<Vec<T>> { let mut buf = vec![0u8; cur_block_size * dim * std::mem::size_of::<T>()]; self.pq_data_file_reader.read_exact(&mut buf)?; let ptr = buf.as_ptr() as *const T; let block_data = unsafe { std::slice::from_raw_parts(ptr, cur_block_size * dim) }; Ok(block_data.to_vec()) } /// streams data from the file, and samples each vector with probability p_val /// and returns a matrix of size slice_size* ndims as floating point type. /// the slice_size and ndims are set inside the function. /// # Arguments /// * `file_name` - filename where the data is /// * `p_val` - possibility to sample data /// * `sampled_vectors` - sampled vector chose by p_val possibility /// * `slice_size` - how many sampled data return /// * `dim` - each sample data dimension pub fn gen_random_slice<T: Default + Copy + Into<f32>>( &self, mut p_val: f64, ) -> ANNResult<(Vec<f32>, usize, usize)> { let read_blk_size = 64 * 1024 * 1024; let mut reader = CachedReader::new(&self.pq_data_file, read_blk_size)?; let npts = reader.read_u32()? as usize; let dim = reader.read_u32()? as usize; let mut sampled_vectors: Vec<f32> = Vec::new(); let mut slice_size = 0; p_val = if p_val < 1f64 { p_val } else { 1f64 }; let mut generator = rand::thread_rng(); let distribution = Uniform::from(0.0..1.0); for _ in 0..npts { let mut cur_vector_bytes = vec![0u8; dim * mem::size_of::<T>()]; reader.read(&mut cur_vector_bytes)?; let random_value = distribution.sample(&mut generator); if random_value < p_val { let ptr = cur_vector_bytes.as_ptr() as *const T; let cur_vector_t = unsafe { std::slice::from_raw_parts(ptr, dim) }; sampled_vectors.extend(cur_vector_t.iter().map(|&t| t.into())); slice_size += 1; } } Ok((sampled_vectors, slice_size, dim)) } } #[cfg(test)] mod pq_storage_tests { use rand::Rng; use super::*; use crate::utils::gen_random_slice; const DATA_FILE: &str = "tests/data/siftsmall_learn.bin"; const PQ_PIVOT_PATH: &str = "tests/data/siftsmall_learn.bin_pq_pivots.bin"; const PQ_COMPRESSED_PATH: &str = "tests/data/empty_pq_compressed.bin"; #[test] fn new_test() { let result = PQStorage::new(PQ_PIVOT_PATH, PQ_COMPRESSED_PATH, DATA_FILE); assert!(result.is_ok()); } #[test] fn write_compressed_pivot_metadata_test() { let compress_pivot_path = "write_compressed_pivot_metadata_test.bin"; let result = PQStorage::new(PQ_PIVOT_PATH, compress_pivot_path, DATA_FILE).unwrap(); _ = result.write_compressed_pivot_metadata(100, 20); let mut result_reader = File::open(compress_pivot_path).unwrap(); let npts_i32 = result_reader.read_i32::<LittleEndian>().unwrap(); let dim_i32 = result_reader.read_i32::<LittleEndian>().unwrap(); assert_eq!(npts_i32, 100); assert_eq!(dim_i32, 20); std::fs::remove_file(compress_pivot_path).unwrap(); } #[test] fn write_compressed_pivot_data_test() { let compress_pivot_path = "write_compressed_pivot_data_test.bin"; let result = PQStorage::new(PQ_PIVOT_PATH, compress_pivot_path, DATA_FILE).unwrap(); let mut rng = rand::thread_rng(); let num_centers = 256; let block_size = 4; let num_pq_chunks = 2; let compressed_base: Vec<usize> = (0..block_size * num_pq_chunks) .map(|_| rng.gen_range(0..num_centers)) .collect(); _ = result.write_compressed_pivot_data( &compressed_base, num_centers, block_size, num_pq_chunks, ); let mut result_reader = File::open(compress_pivot_path).unwrap(); _ = result_reader.read_i32::<LittleEndian>().unwrap(); _ = result_reader.read_i32::<LittleEndian>().unwrap(); let mut buf = vec![0u8; block_size * num_pq_chunks * std::mem::size_of::<u8>()]; result_reader.read_exact(&mut buf).unwrap(); let ptr = buf.as_ptr() as *const u8; let block_data = unsafe { std::slice::from_raw_parts(ptr, block_size * num_pq_chunks) }; for index in 0..block_data.len() { assert_eq!(compressed_base[index], block_data[index] as usize); } std::fs::remove_file(compress_pivot_path).unwrap(); } #[test] fn pivot_data_exist_test() { let result = PQStorage::new(PQ_PIVOT_PATH, PQ_COMPRESSED_PATH, DATA_FILE).unwrap(); assert!(result.pivot_data_exist()); let pivot_path = "not_exist_pivot_path.bin"; let result = PQStorage::new(pivot_path, PQ_COMPRESSED_PATH, DATA_FILE).unwrap(); assert!(!result.pivot_data_exist()); } #[test] fn read_pivot_metadata_test() { let result = PQStorage::new(PQ_PIVOT_PATH, PQ_COMPRESSED_PATH, DATA_FILE).unwrap(); let (npt, dim) = result.read_pivot_metadata().unwrap(); assert_eq!(npt, 256); assert_eq!(dim, 128); } #[test] fn load_pivot_data_test() { let result = PQStorage::new(PQ_PIVOT_PATH, PQ_COMPRESSED_PATH, DATA_FILE).unwrap(); let (pq_pivot_data, centroids, chunk_offsets) = result.load_pivot_data(&1, &256, &128).unwrap(); assert_eq!(pq_pivot_data.len(), 256 * 128); assert_eq!(centroids.len(), 128); assert_eq!(chunk_offsets.len(), 2); } #[test] fn read_pq_data_metadata_test() { let mut result = PQStorage::new(PQ_PIVOT_PATH, PQ_COMPRESSED_PATH, DATA_FILE).unwrap(); let (npt, dim) = result.read_pq_data_metadata().unwrap(); assert_eq!(npt, 25000); assert_eq!(dim, 128); } #[test] fn gen_random_slice_test() { let file_name = "gen_random_slice_test.bin"; //npoints=2, dim=8 let data: [u8; 72] = [ 2, 0, 0, 0, 8, 0, 0, 0, 0x00, 0x00, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x40, 0x00, 0x00, 0x80, 0x40, 0x00, 0x00, 0xa0, 0x40, 0x00, 0x00, 0xc0, 0x40, 0x00, 0x00, 0xe0, 0x40, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x10, 0x41, 0x00, 0x00, 0x20, 0x41, 0x00, 0x00, 0x30, 0x41, 0x00, 0x00, 0x40, 0x41, 0x00, 0x00, 0x50, 0x41, 0x00, 0x00, 0x60, 0x41, 0x00, 0x00, 0x70, 0x41, 0x00, 0x00, 0x80, 0x41, ]; std::fs::write(file_name, data).expect("Failed to write sample file"); let (sampled_vectors, slice_size, ndims) = gen_random_slice::<f32>(file_name, 1f64).unwrap(); let mut start = 8; (0..sampled_vectors.len()).for_each(|i| { assert_eq!(sampled_vectors[i].to_le_bytes(), data[start..start + 4]); start += 4; }); assert_eq!(sampled_vectors.len(), 16); assert_eq!(slice_size, 2); assert_eq!(ndims, 8); let (sampled_vectors, slice_size, ndims) = gen_random_slice::<f32>(file_name, 0f64).unwrap(); assert_eq!(sampled_vectors.len(), 0); assert_eq!(slice_size, 0); assert_eq!(ndims, 8); std::fs::remove_file(file_name).expect("Failed to delete file"); } }
#[doc = "Register `AHB2ENR` reader"] pub type R = crate::R<AHB2ENR_SPEC>; #[doc = "Register `AHB2ENR` writer"] pub type W = crate::W<AHB2ENR_SPEC>; #[doc = "Field `GPIOAEN` reader - IO port A clock enable"] pub type GPIOAEN_R = crate::BitReader<GPIOAEN_A>; #[doc = "IO port A clock enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum GPIOAEN_A { #[doc = "0: The selected clock is disabled"] Disabled = 0, #[doc = "1: The selected clock is enabled"] Enabled = 1, } impl From<GPIOAEN_A> for bool { #[inline(always)] fn from(variant: GPIOAEN_A) -> Self { variant as u8 != 0 } } impl GPIOAEN_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> GPIOAEN_A { match self.bits { false => GPIOAEN_A::Disabled, true => GPIOAEN_A::Enabled, } } #[doc = "The selected clock is disabled"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == GPIOAEN_A::Disabled } #[doc = "The selected clock is enabled"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == GPIOAEN_A::Enabled } } #[doc = "Field `GPIOAEN` writer - IO port A clock enable"] pub type GPIOAEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, GPIOAEN_A>; impl<'a, REG, const O: u8> GPIOAEN_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "The selected clock is disabled"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(GPIOAEN_A::Disabled) } #[doc = "The selected clock is enabled"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(GPIOAEN_A::Enabled) } } #[doc = "Field `GPIOBEN` reader - IO port B clock enable"] pub use GPIOAEN_R as GPIOBEN_R; #[doc = "Field `GPIOCEN` reader - IO port C clock enable"] pub use GPIOAEN_R as GPIOCEN_R; #[doc = "Field `GPIODEN` reader - IO port D clock enable"] pub use GPIOAEN_R as GPIODEN_R; #[doc = "Field `GPIOEEN` reader - IO port E clock enable"] pub use GPIOAEN_R as GPIOEEN_R; #[doc = "Field `GPIOFEN` reader - IO port F clock enable"] pub use GPIOAEN_R as GPIOFEN_R; #[doc = "Field `GPIOGEN` reader - IO port G clock enable"] pub use GPIOAEN_R as GPIOGEN_R; #[doc = "Field `GPIOHEN` reader - IO port H clock enable"] pub use GPIOAEN_R as GPIOHEN_R; #[doc = "Field `ADCEN` reader - ADC clock enable"] pub use GPIOAEN_R as ADCEN_R; #[doc = "Field `AESEN` reader - AES accelerator clock enable"] pub use GPIOAEN_R as AESEN_R; #[doc = "Field `HASHEN` reader - HASH clock enable"] pub use GPIOAEN_R as HASHEN_R; #[doc = "Field `RNGEN` reader - Random Number Generator clock enable"] pub use GPIOAEN_R as RNGEN_R; #[doc = "Field `PKAEN` reader - PKAEN"] pub use GPIOAEN_R as PKAEN_R; #[doc = "Field `OTFDEC1EN` reader - OTFDEC1EN"] pub use GPIOAEN_R as OTFDEC1EN_R; #[doc = "Field `SDMMC1EN` reader - SDMMC1 clock enable"] pub use GPIOAEN_R as SDMMC1EN_R; #[doc = "Field `GPIOBEN` writer - IO port B clock enable"] pub use GPIOAEN_W as GPIOBEN_W; #[doc = "Field `GPIOCEN` writer - IO port C clock enable"] pub use GPIOAEN_W as GPIOCEN_W; #[doc = "Field `GPIODEN` writer - IO port D clock enable"] pub use GPIOAEN_W as GPIODEN_W; #[doc = "Field `GPIOEEN` writer - IO port E clock enable"] pub use GPIOAEN_W as GPIOEEN_W; #[doc = "Field `GPIOFEN` writer - IO port F clock enable"] pub use GPIOAEN_W as GPIOFEN_W; #[doc = "Field `GPIOGEN` writer - IO port G clock enable"] pub use GPIOAEN_W as GPIOGEN_W; #[doc = "Field `GPIOHEN` writer - IO port H clock enable"] pub use GPIOAEN_W as GPIOHEN_W; #[doc = "Field `ADCEN` writer - ADC clock enable"] pub use GPIOAEN_W as ADCEN_W; #[doc = "Field `AESEN` writer - AES accelerator clock enable"] pub use GPIOAEN_W as AESEN_W; #[doc = "Field `HASHEN` writer - HASH clock enable"] pub use GPIOAEN_W as HASHEN_W; #[doc = "Field `RNGEN` writer - Random Number Generator clock enable"] pub use GPIOAEN_W as RNGEN_W; #[doc = "Field `PKAEN` writer - PKAEN"] pub use GPIOAEN_W as PKAEN_W; #[doc = "Field `OTFDEC1EN` writer - OTFDEC1EN"] pub use GPIOAEN_W as OTFDEC1EN_W; #[doc = "Field `SDMMC1EN` writer - SDMMC1 clock enable"] pub use GPIOAEN_W as SDMMC1EN_W; impl R { #[doc = "Bit 0 - IO port A clock enable"] #[inline(always)] pub fn gpioaen(&self) -> GPIOAEN_R { GPIOAEN_R::new((self.bits & 1) != 0) } #[doc = "Bit 1 - IO port B clock enable"] #[inline(always)] pub fn gpioben(&self) -> GPIOBEN_R { GPIOBEN_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 2 - IO port C clock enable"] #[inline(always)] pub fn gpiocen(&self) -> GPIOCEN_R { GPIOCEN_R::new(((self.bits >> 2) & 1) != 0) } #[doc = "Bit 3 - IO port D clock enable"] #[inline(always)] pub fn gpioden(&self) -> GPIODEN_R { GPIODEN_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bit 4 - IO port E clock enable"] #[inline(always)] pub fn gpioeen(&self) -> GPIOEEN_R { GPIOEEN_R::new(((self.bits >> 4) & 1) != 0) } #[doc = "Bit 5 - IO port F clock enable"] #[inline(always)] pub fn gpiofen(&self) -> GPIOFEN_R { GPIOFEN_R::new(((self.bits >> 5) & 1) != 0) } #[doc = "Bit 6 - IO port G clock enable"] #[inline(always)] pub fn gpiogen(&self) -> GPIOGEN_R { GPIOGEN_R::new(((self.bits >> 6) & 1) != 0) } #[doc = "Bit 7 - IO port H clock enable"] #[inline(always)] pub fn gpiohen(&self) -> GPIOHEN_R { GPIOHEN_R::new(((self.bits >> 7) & 1) != 0) } #[doc = "Bit 13 - ADC clock enable"] #[inline(always)] pub fn adcen(&self) -> ADCEN_R { ADCEN_R::new(((self.bits >> 13) & 1) != 0) } #[doc = "Bit 16 - AES accelerator clock enable"] #[inline(always)] pub fn aesen(&self) -> AESEN_R { AESEN_R::new(((self.bits >> 16) & 1) != 0) } #[doc = "Bit 17 - HASH clock enable"] #[inline(always)] pub fn hashen(&self) -> HASHEN_R { HASHEN_R::new(((self.bits >> 17) & 1) != 0) } #[doc = "Bit 18 - Random Number Generator clock enable"] #[inline(always)] pub fn rngen(&self) -> RNGEN_R { RNGEN_R::new(((self.bits >> 18) & 1) != 0) } #[doc = "Bit 19 - PKAEN"] #[inline(always)] pub fn pkaen(&self) -> PKAEN_R { PKAEN_R::new(((self.bits >> 19) & 1) != 0) } #[doc = "Bit 21 - OTFDEC1EN"] #[inline(always)] pub fn otfdec1en(&self) -> OTFDEC1EN_R { OTFDEC1EN_R::new(((self.bits >> 21) & 1) != 0) } #[doc = "Bit 22 - SDMMC1 clock enable"] #[inline(always)] pub fn sdmmc1en(&self) -> SDMMC1EN_R { SDMMC1EN_R::new(((self.bits >> 22) & 1) != 0) } } impl W { #[doc = "Bit 0 - IO port A clock enable"] #[inline(always)] #[must_use] pub fn gpioaen(&mut self) -> GPIOAEN_W<AHB2ENR_SPEC, 0> { GPIOAEN_W::new(self) } #[doc = "Bit 1 - IO port B clock enable"] #[inline(always)] #[must_use] pub fn gpioben(&mut self) -> GPIOBEN_W<AHB2ENR_SPEC, 1> { GPIOBEN_W::new(self) } #[doc = "Bit 2 - IO port C clock enable"] #[inline(always)] #[must_use] pub fn gpiocen(&mut self) -> GPIOCEN_W<AHB2ENR_SPEC, 2> { GPIOCEN_W::new(self) } #[doc = "Bit 3 - IO port D clock enable"] #[inline(always)] #[must_use] pub fn gpioden(&mut self) -> GPIODEN_W<AHB2ENR_SPEC, 3> { GPIODEN_W::new(self) } #[doc = "Bit 4 - IO port E clock enable"] #[inline(always)] #[must_use] pub fn gpioeen(&mut self) -> GPIOEEN_W<AHB2ENR_SPEC, 4> { GPIOEEN_W::new(self) } #[doc = "Bit 5 - IO port F clock enable"] #[inline(always)] #[must_use] pub fn gpiofen(&mut self) -> GPIOFEN_W<AHB2ENR_SPEC, 5> { GPIOFEN_W::new(self) } #[doc = "Bit 6 - IO port G clock enable"] #[inline(always)] #[must_use] pub fn gpiogen(&mut self) -> GPIOGEN_W<AHB2ENR_SPEC, 6> { GPIOGEN_W::new(self) } #[doc = "Bit 7 - IO port H clock enable"] #[inline(always)] #[must_use] pub fn gpiohen(&mut self) -> GPIOHEN_W<AHB2ENR_SPEC, 7> { GPIOHEN_W::new(self) } #[doc = "Bit 13 - ADC clock enable"] #[inline(always)] #[must_use] pub fn adcen(&mut self) -> ADCEN_W<AHB2ENR_SPEC, 13> { ADCEN_W::new(self) } #[doc = "Bit 16 - AES accelerator clock enable"] #[inline(always)] #[must_use] pub fn aesen(&mut self) -> AESEN_W<AHB2ENR_SPEC, 16> { AESEN_W::new(self) } #[doc = "Bit 17 - HASH clock enable"] #[inline(always)] #[must_use] pub fn hashen(&mut self) -> HASHEN_W<AHB2ENR_SPEC, 17> { HASHEN_W::new(self) } #[doc = "Bit 18 - Random Number Generator clock enable"] #[inline(always)] #[must_use] pub fn rngen(&mut self) -> RNGEN_W<AHB2ENR_SPEC, 18> { RNGEN_W::new(self) } #[doc = "Bit 19 - PKAEN"] #[inline(always)] #[must_use] pub fn pkaen(&mut self) -> PKAEN_W<AHB2ENR_SPEC, 19> { PKAEN_W::new(self) } #[doc = "Bit 21 - OTFDEC1EN"] #[inline(always)] #[must_use] pub fn otfdec1en(&mut self) -> OTFDEC1EN_W<AHB2ENR_SPEC, 21> { OTFDEC1EN_W::new(self) } #[doc = "Bit 22 - SDMMC1 clock enable"] #[inline(always)] #[must_use] pub fn sdmmc1en(&mut self) -> SDMMC1EN_W<AHB2ENR_SPEC, 22> { SDMMC1EN_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "AHB2 peripheral clock enable register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ahb2enr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ahb2enr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct AHB2ENR_SPEC; impl crate::RegisterSpec for AHB2ENR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`ahb2enr::R`](R) reader structure"] impl crate::Readable for AHB2ENR_SPEC {} #[doc = "`write(|w| ..)` method takes [`ahb2enr::W`](W) writer structure"] impl crate::Writable for AHB2ENR_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets AHB2ENR to value 0"] impl crate::Resettable for AHB2ENR_SPEC { const RESET_VALUE: Self::Ux = 0; }
pub const SEEK_SET: u32 = 0; pub const SEEK_CUR: u32 = 1; pub const SEEK_END: u32 = 2;
pub mod item; // Reexports pub use item::{Debug, Item, Tool, Spell}; use crate::{ comp::HealthSource, effect::Effect, comp::spell::SpellShape }; use specs::{Component, HashMapStorage, NullStorage}; #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Inventory { pub slots: Vec<Option<Item>>, } impl Inventory { pub fn slots(&self) -> &[Option<Item>] { &self.slots } pub fn len(&self) -> usize { self.slots.len() } /// Adds a new item to the first empty slot of the inventory. Returns the item again if no free /// slot was found. pub fn push(&mut self, item: Item) -> Option<Item> { match self.slots.iter_mut().find(|slot| slot.is_none()) { Some(slot) => { *slot = Some(item); None } None => Some(item), } } /// Replaces an item in a specific slot of the inventory. Returns the old item or the same item again if that slot /// was not found. pub fn insert(&mut self, cell: usize, item: Item) -> Result<Option<Item>, Item> { match self.slots.get_mut(cell) { Some(slot) => { let old = slot.take(); *slot = Some(item); Ok(old) } None => Err(item), } } pub fn is_full(&self) -> bool { self.slots.iter().all(|slot| slot.is_some()) } /// Get content of a slot pub fn get(&self, cell: usize) -> Option<&Item> { self.slots.get(cell).and_then(Option::as_ref) } /// Swap the items inside of two slots pub fn swap_slots(&mut self, a: usize, b: usize) { if a.max(b) < self.slots.len() { self.slots.swap(a, b); } } /// Remove an item from the slot pub fn remove(&mut self, cell: usize) -> Option<Item> { self.slots.get_mut(cell).and_then(|item| item.take()) } } impl Default for Inventory { fn default() -> Inventory { let mut inventory = Inventory { slots: vec![None; 24], }; inventory.push(Item::Debug(Debug::Boost)); inventory.push(Item::Debug(Debug::Possess)); inventory.push(Item::Spell { effect: Effect::Health(20, HealthSource::Item), shape: SpellShape::Sphere(40.0), }); inventory.push(Item::Tool { kind: Tool::Bow, power: 10, stamina: 0, strength: 0, dexterity: 0, intelligence: 0, }); inventory.push(Item::Tool { kind: Tool::Dagger, power: 10, stamina: 0, strength: 0, dexterity: 0, intelligence: 0, }); inventory.push(Item::Tool { kind: Tool::Sword, power: 10, stamina: 0, strength: 0, dexterity: 0, intelligence: 0, }); inventory.push(Item::Tool { kind: Tool::Axe, power: 10, stamina: 0, strength: 0, dexterity: 0, intelligence: 0, }); inventory.push(Item::Tool { kind: Tool::Hammer, power: 10, stamina: 0, strength: 0, dexterity: 0, intelligence: 0, }); inventory } } impl Component for Inventory { type Storage = HashMapStorage<Self>; } // ForceUpdate #[derive(Copy, Clone, Debug, Default, Serialize, Deserialize)] pub struct InventoryUpdate; impl Component for InventoryUpdate { type Storage = NullStorage<Self>; }
use wrapper_sys; pub fn add_numbers(a: i32, b: i32) -> i32 { unsafe { wrapper_sys::add_numbers(a, b) } } #[cfg(test)] mod tests { #[test] fn test_me() { assert_eq!(super::add_numbers(1, 2), 3); assert_eq!(super::add_numbers(4, 5), 9); } }
use crate::Point3D; use crate::static_optional::{StaticNone, StaticOptional, StaticSome}; #[derive(Default)] pub struct Point3DBuilder<X, Y, Z> where X: StaticOptional<Element = f64>, Y: StaticOptional<Element = f64>, Z: StaticOptional<Element = f64>, { x: X, y: Y, z: Z, } impl<Y, Z> Point3DBuilder<StaticNone<f64>, Y, Z> where Y: StaticOptional<Element = f64>, Z: StaticOptional<Element = f64>, { pub fn x(self, x: f64) -> Point3DBuilder<StaticSome<f64>, Y, Z> { Point3DBuilder { x: StaticSome(x), y: self.y, z: self.z, } } } impl<X, Z> Point3DBuilder<X, StaticNone<f64>, Z> where X: StaticOptional<Element = f64>, Z: StaticOptional<Element = f64>, { pub fn y(self, y: f64) -> Point3DBuilder<X, StaticSome<f64>, Z> { Point3DBuilder { x: self.x, y: StaticSome(y), z: self.z, } } } impl<X, Y> Point3DBuilder<X, Y, StaticNone<f64>> where X: StaticOptional<Element = f64>, Y: StaticOptional<Element = f64>, { pub fn z(self, z: f64) -> Point3DBuilder<X, Y, StaticSome<f64>> { Point3DBuilder { x: self.x, y: self.y, z: StaticSome(z), } } } impl Point3DBuilder<StaticSome<f64>, StaticSome<f64>, StaticSome<f64>> { pub fn build(self) -> Point3D { Point3D { x: self.x.0, y: self.y.0, z: self.z.0, } } }
use std::io::{self, ErrorKind}; use std::rc::Rc; use mio::net::TcpListener; use mio::unix::UnixReady; use mio::{Events, Poll, PollOpt, Ready, Token}; use slab; use connection::Connection; type Slab<T> = slab::Slab<T, Token>; pub struct Server { // main socket for our server sock: TcpListener, // token of our server. we keep track of it here instead of doing `const SERVER = Token(_)`. token: Token, // a list of connections _accepted_ by our server conns: Slab<Connection>, } const READ_WRITE_CAP: usize = 65536; impl Server { pub fn new(sock: TcpListener) -> Server { Server { sock: sock, // Give our server token a number much larger than our slab capacity. The slab used to // track an internal offset, but does not anymore. token: Token(10_000_000), // We will handle a max of READ_WRITE_CAP connections conns: Slab::with_capacity(READ_WRITE_CAP), } } pub fn run(&mut self, poll: &mut Poll) -> io::Result<()> { self.register(poll)?; info!("SERVER RUN LOOP STARTING..."); loop { let mut events = Events::with_capacity(1024); poll.poll(&mut events, None)?; for event in events.iter() { trace!(""); trace!("------>>>>>> EVENT={:?}", event); self.ready(poll, event.token(), event.readiness()); trace!("^^^^^^"); } } } /// Register Server with the poller. /// /// This keeps the registration details neatly tucked away inside of our implementation. pub fn register(&mut self, poll: &mut Poll) -> io::Result<()> { poll.register(&self.sock, self.token, Ready::readable(), PollOpt::edge()) .or_else(|e| { error!("Failed to register server {:?}, {:?}", self.token, e); Err(e) }) } /// Remove a token from the slab fn remove_token(&mut self, token: Token) { match self.conns.remove(token) { Some(_c) => { debug!("reset connection; token={:?}", token); } None => { warn!("Unable to remove connection for {:?}", token); } } } fn report_slab_size(&mut self) { debug!("slab size: {}", self.conns.len()); // let slsize = self.conns.count(); // if slsize % 200 == 0 { // println!("now at {} connections", slsize); // } } fn conn_hup(&mut self, poll: &mut Poll, token: Token) -> io::Result<()> { debug!("SERVER CONN HUP; tok={:?}", token); let res = self.connection(token).hup(poll, true); // TODO: hup res ok? self.remove_token(token); self.report_slab_size(); res } fn ready(&mut self, poll: &mut Poll, token: Token, event: Ready) { //debug!("GOT {:?} EVENT = {:?}", token, event); if self.token != token && self.conns.contains(token) == false { debug!("Failed to find connection for {:?}", token); return; } let event = UnixReady::from(event); if event.is_error() { warn!("Error event for {:?}", token); self.remove_token(token); return; } if event.is_hup() { trace!("HUP EVENT FOR {:?}", token); let _ = self.conn_hup(poll, token); return; } let event = Ready::from(event); // We never expect a write event for our `Server` token . A write event for any other token // should be handed off to that connection. if event.is_writable() { trace!("### WRITE EVENT FOR {:?}", token); assert!(self.token != token, "Received writable event for Server"); /// Forward a readable event to an established connection. /// /// Connections are identified by the token provided to us from the poller. Once a read has /// finished, push the receive buffer into the all the existing connections so we can /// broadcast. match self.connection(token).writable(poll) { Ok(()) => { //trace!("Server.rs: Recive writable FOR TOKEN:{:?}", token); let message = b"HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n<html><body>Hello world</body></html>\r\n"; let rc_message = Rc::new(message.to_vec()); self.connection(token) .send_message(rc_message.clone()) .unwrap(); // while let Some(message) = self.connection(token).readable()? { // let rc_message = Rc::new(message); // self.connection(token).send_message(rc_message.clone())?; // Echo the message too all connected clients. // for c in self.conns.iter_mut() { // c.send_message(rc_message.clone())?; // } // } // println!("SHOULD HUP FOR HTTP {:?}", token); let _ = self.conn_hup(poll, token); return; } Err(e) => { warn!("Write event failed for {:?}, {:?}", token, e); self.remove_token(token); return; } } } // A read event for our `Server` token means we are establishing a new connection. A read // event for any other token should be handed off to that connection. if event.is_readable() { trace!("### READ EVENT FOR {:?}", token); if self.token == token { self.accept(poll); } else { let conn = self.connection(token); match conn.readable(poll).unwrap() { // should return empty message and keep it inside connection to reduce data movement as it unused Some(message) => { // println!("GOT MESSAGE {}", String::from_utf8_lossy(&message)); //let rc_message = Rc::new(message); //conn.send_message(rc_message.clone()).unwrap(); } None => {} } } } /* trace!(" .....ready rerigster..................."); if self.token != token { match self.connection(token).reregister(poll) { Ok(()) => {} Err(e) => { warn!("Reregister failed {:?}", e); self.remove_token(token); return; } } } trace!(" .....ready end..................."); */ } /// Accept a _new_ client connection. /// /// The server will keep track of the new connection and forward any events from the poller /// to this connection. fn accept(&mut self, poll: &mut Poll) { debug!("SERVET ACCEPTING NEW SOCKET"); loop { // Log an error if there is no socket, but otherwise move on so we do not tear down the // entire server. let sock = match self.sock.accept() { Ok((sock, _)) => { trace!("accept new socker {:?}",sock); sock }, Err(e) => { if e.kind() == ErrorKind::WouldBlock { debug!("accept encountered WouldBlock"); } else { error!("Failed to accept new socket, {:?}", e); } return; } }; let token = match self.conns.vacant_entry() { Some(entry) => { let c = Connection::new(sock, entry.index()); entry.insert(c).index() } None => { error!("Failed to insert connection into slab"); return; } }; debug!("REGISTERING {:?} WITH POLLER", token); match self.connection(token).register(poll) { Ok(_) => {} Err(e) => { error!( "Failed to register {:?} connection with poller, {:?}", token, e ); self.remove_token(token); } } } } /// Find a connection in the slab using the given token. /// /// This function will panic if the token does not exist. Use self.conns.contains(token) /// before using this function. fn connection(&mut self, token: Token) -> &mut Connection { &mut self.conns[token] } }
use std::fs::File; use std::io::{BufRead, BufReader}; use std::str::Chars; use math::round; pub fn exercise() { let mut floor_plan: Vec<Vec<char>> = vec![vec!['_'; 8]; 128]; let mut seat_ids: Vec<i32> = Vec::new(); let data = load_data(); for ticket in data { let mut row_position = (0,127); row_position = find(ticket[..7].chars(), row_position.0, row_position.1); let row = row_position.0; let mut col_position = (0,7); col_position = find(ticket[7..].chars(), col_position.0, col_position.1); let col = col_position.0; seat_ids.push((row * 8) + col); let _ignore = std::mem::replace(&mut floor_plan[row as usize][col as usize], 'X'); } println!("Highest seat ID value is {}.", seat_ids.iter().max().unwrap()); for (i, row) in floor_plan.iter_mut().enumerate() { for (y, col) in row.iter_mut().enumerate() { if *col != 'X' { let id = (i*8)+y; if id > 0 && seat_ids.iter().any(|i| *i == ((id - 1) as i32 )) && seat_ids.iter().any(|i| *i == ((id + 1) as i32)) { println!("My seat ID is {}.", id); } } } } } fn find(mut ticket: Chars, lower: i32, upper: i32) -> (i32, i32) { if ticket.as_str() == "" { return (lower, upper); } else { let indicator = ticket.next().unwrap(); if indicator == 'F' || indicator == 'L' { return find(ticket, lower, floor_division((upper+lower) as f64, 2 as f64)); } else if indicator == 'B' || indicator == 'R' { return find(ticket, ceil_division((upper+lower) as f64, 2 as f64), upper); } else { println!("Unsupported indicator '{}'.", indicator); return (lower, upper); } } } fn ceil_division(divisor:f64, dividend:f64) -> i32 { return round::ceil(divisor/dividend, 0) as i32; } fn floor_division(divisor:f64, dividend:f64) -> i32 { return round::floor(divisor/dividend, 0) as i32; } fn load_data() -> Vec<String> { let input = File::open("./data/day5.data").unwrap(); let reader = BufReader::new(input); let data: Vec<String> = reader.lines() .map(|l| { l.unwrap() }) .collect(); return data; }
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::mem; use libc; use green::Callback; use {raw, uvll, EventLoop, UvResult}; use raw::Handle; use homing::{HomingIO, HomeHandle}; pub struct Signal { handle: raw::Signal, home: HomeHandle, } struct Data { callback: Option<Box<Callback + Send>>, } impl Signal { pub fn new() -> UvResult<Signal> { Signal::new_on(&mut *try!(EventLoop::borrow())) } pub fn new_on(eloop: &mut EventLoop) -> UvResult<Signal> { unsafe { let mut ret = Signal { handle: try!(raw::Signal::new(&eloop.uv_loop())), home: eloop.make_handle(), }; let data = box Data { callback: None }; ret.handle.set_data(mem::transmute(data)); Ok(ret) } } /// Attempts to start listening for the signal `signal`. /// /// When the process receives the specified signal, the callback `cb` will /// be invoked on the event loop. This function will cancel any previous /// signal being listened for. /// /// For more information, see `uv_signal_start`. pub fn start(&mut self, signal: libc::c_int, cb: Box<Callback + Send>) -> UvResult<()> { // Be sure to run user destructors outside the homing missile, not // inside. let _prev = { let _m = self.fire_homing_missile(); try!(self.handle.start(signal, signal_cb)); let data: &mut Data = unsafe { mem::transmute(self.handle.get_data()) }; mem::replace(&mut data.callback, Some(cb)) }; Ok(()) } /// Stop listening for the signal previously registered in `start`. pub fn stop(&mut self) -> UvResult<()> { let _prev = { let _m = self.fire_homing_missile(); try!(self.handle.stop()); let data: &mut Data = unsafe { mem::transmute(self.handle.get_data()) }; data.callback.take() }; Ok(()) } /// Gain access to the underlying raw signal handle. /// /// This function is unsafe as there is no guarantee that any safe /// modifications to the signal handle are actually safe to perform given the /// assumptions of this object. pub unsafe fn raw(&self) -> raw::Signal { self.handle } } extern fn signal_cb(handle: *mut uvll::uv_signal_t, _signum: libc::c_int) { unsafe { let raw: raw::Signal = Handle::from_raw(handle); let data: &mut Data = mem::transmute(raw.get_data()); assert!(data.callback.is_some()); data.callback.as_mut().unwrap().call(); } } impl HomingIO for Signal { fn home(&self) -> &HomeHandle { &self.home } } impl Drop for Signal { fn drop(&mut self) { let _data: Box<Data> = unsafe { let _m = self.fire_homing_missile(); self.handle.stop().unwrap(); self.handle.close_and_free(); mem::transmute(self.handle.get_data()) }; } }
use crypto_hash::{hex_digest, Algorithm}; use hyper::{Body, Client, Method, Request}; use hyper_tls::HttpsConnector; use serde::{Deserialize, Serialize}; use std::error::Error; #[derive(Serialize, Deserialize)] pub struct ErrorData { pub error: ErrorInfo, } #[derive(Serialize, Deserialize)] pub struct ErrorInfo { pub status: usize, pub code: usize, pub description: String, } #[derive(Serialize, Deserialize)] pub struct SuccessData<T> { data: T, } #[derive(Serialize, Deserialize)] pub struct Version { pub build: String, pub developers: String, pub timestamp: String, } const BASE_URL: &str = "https://api.mixin.one"; pub async fn get<T>(token: String, url: &str) -> Result<T, Box<dyn Error>> where T: serde::de::DeserializeOwned, { let https = HttpsConnector::new(); let client = Client::builder().build::<_, Body>(https); let res = client .request( Request::builder() .method(Method::GET) .header("Content-Type", "application/json") .header("Authorization", format!("Bearer {}", token)) .uri(format!("{}{}", BASE_URL, url)) .body(Body::empty()) .unwrap(), ) .await?; let body = res.into_body(); let body = hyper::body::to_bytes(body).await?; let body: SuccessData<T> = serde_json::from_slice(&body.slice(..)).unwrap(); Ok(body.data) } pub async fn post<T>(token: String, url: &str, body: Body) -> Result<T, Box<dyn Error>> where T: serde::de::DeserializeOwned, { let https = HttpsConnector::new(); let client = Client::builder().build::<_, Body>(https); let res = client .request( Request::builder() .method(Method::POST) .header("Content-Type", "application/json") .header("Authorization", format!("Bearer {}", token)) .uri(format!("{}{}", BASE_URL, url)) .body(body) .unwrap(), ) .await?; let body = res.into_body(); let body = hyper::body::to_bytes(body).await?; let body: SuccessData<T> = serde_json::from_slice(&body.slice(..)).unwrap(); Ok(body.data) } pub fn sign_request(method: &str, url: &str, body: &str) -> String { let payload = format!("{}{}{}", method.to_uppercase(), url, body); hex_digest(Algorithm::SHA256, payload.as_bytes()) }
#[doc = "Register `CFGR3` reader"] pub type R = crate::R<CFGR3_SPEC>; #[doc = "Register `CFGR3` writer"] pub type W = crate::W<CFGR3_SPEC>; #[doc = "Field `TRIM1_NG_CCRPD` reader - SW trim value for RPD resistors on the CC1 line"] pub type TRIM1_NG_CCRPD_R = crate::FieldReader; #[doc = "Field `TRIM1_NG_CCRPD` writer - SW trim value for RPD resistors on the CC1 line"] pub type TRIM1_NG_CCRPD_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>; #[doc = "Field `TRIM1_NG_CC1A5` reader - SW trim value for RP1A5 resistors on the CC1 line"] pub type TRIM1_NG_CC1A5_R = crate::FieldReader; #[doc = "Field `TRIM1_NG_CC1A5` writer - SW trim value for RP1A5 resistors on the CC1 line"] pub type TRIM1_NG_CC1A5_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 5, O>; #[doc = "Field `TRIM1_NG_CC3A0` reader - SW trim value for RP3A0 resistors on the CC1 line"] pub type TRIM1_NG_CC3A0_R = crate::FieldReader; #[doc = "Field `TRIM1_NG_CC3A0` writer - SW trim value for RP3A0 resistors on the CC1 line"] pub type TRIM1_NG_CC3A0_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>; #[doc = "Field `TRIM2_NG_CCRPD` reader - SW trim value for RPD resistors on the CC2 line"] pub type TRIM2_NG_CCRPD_R = crate::FieldReader; #[doc = "Field `TRIM2_NG_CCRPD` writer - SW trim value for RPD resistors on the CC2 line"] pub type TRIM2_NG_CCRPD_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>; #[doc = "Field `TRIM2_NG_CC1A5` reader - SW trim value for RP1A5 resistors on the CC2 line"] pub type TRIM2_NG_CC1A5_R = crate::FieldReader; #[doc = "Field `TRIM2_NG_CC1A5` writer - SW trim value for RP1A5 resistors on the CC2 line"] pub type TRIM2_NG_CC1A5_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 5, O>; #[doc = "Field `TRIM2_NG_CC3A0` reader - SW trim value for RP3A0 resistors on the CC2 line"] pub type TRIM2_NG_CC3A0_R = crate::FieldReader; #[doc = "Field `TRIM2_NG_CC3A0` writer - SW trim value for RP3A0 resistors on the CC2 line"] pub type TRIM2_NG_CC3A0_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>; impl R { #[doc = "Bits 0:3 - SW trim value for RPD resistors on the CC1 line"] #[inline(always)] pub fn trim1_ng_ccrpd(&self) -> TRIM1_NG_CCRPD_R { TRIM1_NG_CCRPD_R::new((self.bits & 0x0f) as u8) } #[doc = "Bits 4:8 - SW trim value for RP1A5 resistors on the CC1 line"] #[inline(always)] pub fn trim1_ng_cc1a5(&self) -> TRIM1_NG_CC1A5_R { TRIM1_NG_CC1A5_R::new(((self.bits >> 4) & 0x1f) as u8) } #[doc = "Bits 9:12 - SW trim value for RP3A0 resistors on the CC1 line"] #[inline(always)] pub fn trim1_ng_cc3a0(&self) -> TRIM1_NG_CC3A0_R { TRIM1_NG_CC3A0_R::new(((self.bits >> 9) & 0x0f) as u8) } #[doc = "Bits 16:19 - SW trim value for RPD resistors on the CC2 line"] #[inline(always)] pub fn trim2_ng_ccrpd(&self) -> TRIM2_NG_CCRPD_R { TRIM2_NG_CCRPD_R::new(((self.bits >> 16) & 0x0f) as u8) } #[doc = "Bits 20:24 - SW trim value for RP1A5 resistors on the CC2 line"] #[inline(always)] pub fn trim2_ng_cc1a5(&self) -> TRIM2_NG_CC1A5_R { TRIM2_NG_CC1A5_R::new(((self.bits >> 20) & 0x1f) as u8) } #[doc = "Bits 25:28 - SW trim value for RP3A0 resistors on the CC2 line"] #[inline(always)] pub fn trim2_ng_cc3a0(&self) -> TRIM2_NG_CC3A0_R { TRIM2_NG_CC3A0_R::new(((self.bits >> 25) & 0x0f) as u8) } } impl W { #[doc = "Bits 0:3 - SW trim value for RPD resistors on the CC1 line"] #[inline(always)] #[must_use] pub fn trim1_ng_ccrpd(&mut self) -> TRIM1_NG_CCRPD_W<CFGR3_SPEC, 0> { TRIM1_NG_CCRPD_W::new(self) } #[doc = "Bits 4:8 - SW trim value for RP1A5 resistors on the CC1 line"] #[inline(always)] #[must_use] pub fn trim1_ng_cc1a5(&mut self) -> TRIM1_NG_CC1A5_W<CFGR3_SPEC, 4> { TRIM1_NG_CC1A5_W::new(self) } #[doc = "Bits 9:12 - SW trim value for RP3A0 resistors on the CC1 line"] #[inline(always)] #[must_use] pub fn trim1_ng_cc3a0(&mut self) -> TRIM1_NG_CC3A0_W<CFGR3_SPEC, 9> { TRIM1_NG_CC3A0_W::new(self) } #[doc = "Bits 16:19 - SW trim value for RPD resistors on the CC2 line"] #[inline(always)] #[must_use] pub fn trim2_ng_ccrpd(&mut self) -> TRIM2_NG_CCRPD_W<CFGR3_SPEC, 16> { TRIM2_NG_CCRPD_W::new(self) } #[doc = "Bits 20:24 - SW trim value for RP1A5 resistors on the CC2 line"] #[inline(always)] #[must_use] pub fn trim2_ng_cc1a5(&mut self) -> TRIM2_NG_CC1A5_W<CFGR3_SPEC, 20> { TRIM2_NG_CC1A5_W::new(self) } #[doc = "Bits 25:28 - SW trim value for RP3A0 resistors on the CC2 line"] #[inline(always)] #[must_use] pub fn trim2_ng_cc3a0(&mut self) -> TRIM2_NG_CC3A0_W<CFGR3_SPEC, 25> { TRIM2_NG_CC3A0_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "UCPD configuration register 3\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cfgr3::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cfgr3::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct CFGR3_SPEC; impl crate::RegisterSpec for CFGR3_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`cfgr3::R`](R) reader structure"] impl crate::Readable for CFGR3_SPEC {} #[doc = "`write(|w| ..)` method takes [`cfgr3::W`](W) writer structure"] impl crate::Writable for CFGR3_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets CFGR3 to value 0"] impl crate::Resettable for CFGR3_SPEC { const RESET_VALUE: Self::Ux = 0; }
#![allow(dead_code)] use std::path::Path; use std::{env, fs}; // const SETTINGS_FILE: &str = "Settings.toml"; const LOG4RS_FILE: &str = "log4rs.yaml"; fn main() { let target_dir_path = env::var("OUT_DIR").unwrap(); println!("Out dir = {}", target_dir_path); copy_to_examples(&target_dir_path, LOG4RS_FILE); // copy(&target_dir_path, SETTINGS_FILE); } fn copy_to_examples<S: AsRef<std::ffi::OsStr> + ?Sized, P: Copy + AsRef<Path>>(target_dir_path: &S, file_name: P) { let path_to_target = Path::new( &target_dir_path).join("../../../examples").as_path() // .join("examples/") .join(file_name); println!("Out \'examples\' target dir = {:?}", path_to_target); fs::copy(file_name, path_to_target).unwrap(); } fn copy_to_debug<S: AsRef<std::ffi::OsStr> + ?Sized, P: Copy + AsRef<Path>>(target_dir_path: &S, file_name: P) { let path_to_target = Path::new( &target_dir_path).join("../../..").as_path() // .join("examples/") .join(file_name); println!("Out \'target debug\' dir = {:?}", path_to_target); fs::copy(file_name, path_to_target).unwrap(); }
use super::{PacketEncoder, PacketEncoderExt, SlotData}; use crate::player::Gamemode; use crate::utils::NBTMap; use serde::Serialize; use std::collections::HashMap; pub trait ClientBoundPacket { fn encode(&self) -> PacketEncoder; } // Server List Ping Packets pub struct CResponse { pub json_response: String, } impl ClientBoundPacket for CResponse { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_string(32767, &self.json_response); PacketEncoder::new(buf, 0x00) } } // Login Packets pub struct CDisconnectLogin { pub reason: String, } impl ClientBoundPacket for CDisconnectLogin { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_string(32767, &self.reason); PacketEncoder::new(buf, 0x00) } } pub struct CPong { pub payload: i64, } impl ClientBoundPacket for CPong { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_long(self.payload); PacketEncoder::new(buf, 0x01) } } pub struct CLoginSuccess { pub uuid: u128, pub username: String, } impl ClientBoundPacket for CLoginSuccess { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_uuid(self.uuid); buf.write_string(16, &self.username); PacketEncoder::new(buf, 0x02) } } pub struct CSetCompression { pub threshold: i32, } impl ClientBoundPacket for CSetCompression { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.threshold); PacketEncoder::new(buf, 0x03) } } pub struct CSpawnEntity { pub entity_id: i32, pub object_uuid: u128, pub entity_type: i32, pub x: f64, pub y: f64, pub z: f64, pub pitch: f32, pub yaw: f32, pub data: i32, pub velocity_x: i16, pub velocity_y: i16, pub velocity_z: i16, } impl ClientBoundPacket for CSpawnEntity { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); buf.write_uuid(self.object_uuid); buf.write_varint(self.entity_type); buf.write_double(self.x); buf.write_double(self.y); buf.write_double(self.z); buf.write_byte(((self.yaw / 360f32 * 256f32) as i32 % 256) as i8); buf.write_byte(((self.pitch / 360f32 * 256f32) as i32 % 256) as i8); buf.write_int(self.data); buf.write_short(self.velocity_x); buf.write_short(self.velocity_y); buf.write_short(self.velocity_z); PacketEncoder::new(buf, 0x00) } } pub struct CSpawnLivingEntity { pub entity_id: i32, pub entity_uuid: u128, pub entity_type: i32, pub x: f64, pub y: f64, pub z: f64, pub yaw: f32, pub pitch: f32, pub head_pitch: f32, pub velocity_x: i16, pub velocity_y: i16, pub velocity_z: i16, } impl ClientBoundPacket for CSpawnLivingEntity { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); buf.write_uuid(self.entity_uuid); buf.write_varint(self.entity_type); buf.write_double(self.x); buf.write_double(self.y); buf.write_double(self.z); buf.write_byte(((self.yaw / 360f32 * 256f32) as i32 % 256) as i8); buf.write_byte(((self.pitch / 360f32 * 256f32) as i32 % 256) as i8); buf.write_byte(((self.head_pitch / 360f32 * 256f32) as i32 % 256) as i8); buf.write_short(self.velocity_x); buf.write_short(self.velocity_y); buf.write_short(self.velocity_z); PacketEncoder::new(buf, 0x02) } } pub struct CSpawnPlayer { pub entity_id: i32, pub uuid: u128, pub x: f64, pub y: f64, pub z: f64, pub yaw: f32, pub pitch: f32, } impl ClientBoundPacket for CSpawnPlayer { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); buf.write_uuid(self.uuid); buf.write_double(self.x); buf.write_double(self.y); buf.write_double(self.z); buf.write_byte(((self.yaw / 360f32 * 256f32) as i32 % 256) as i8); buf.write_byte(((self.pitch / 360f32 * 256f32) as i32 % 256) as i8); PacketEncoder::new(buf, 0x04) } } // Play Packets pub struct CEntityAnimation { pub entity_id: i32, pub animation: u8, } impl ClientBoundPacket for CEntityAnimation { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); buf.write_unsigned_byte(self.animation); PacketEncoder::new(buf, 0x06) } } pub struct CBlockEntityData { pub x: i32, pub y: i32, pub z: i32, pub action: u8, pub nbt: nbt::Blob, } impl ClientBoundPacket for CBlockEntityData { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_position(self.x, self.y, self.z); buf.write_unsigned_byte(self.action); buf.write_nbt_blob(&self.nbt); PacketEncoder::new(buf, 0x0A) } } pub struct CBlockChange { pub x: i32, pub y: i32, pub z: i32, pub block_id: i32, } impl ClientBoundPacket for CBlockChange { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_position(self.x, self.y, self.z); buf.write_varint(self.block_id); PacketEncoder::new(buf, 0x0C) } } pub struct CChatMessage { pub message: String, pub position: i8, pub sender: u128, } impl ClientBoundPacket for CChatMessage { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_string(32767, &self.message); buf.write_byte(self.position); buf.write_uuid(self.sender); PacketEncoder::new(buf, 0x0F) } } pub enum CDeclareCommandsNodeParser { Entity(i8), Vec2, Vec3, Integer(i32, i32), Float(f32, f32), BlockPos, BlockState, } impl CDeclareCommandsNodeParser { fn write(&self, buf: &mut Vec<u8>) { use CDeclareCommandsNodeParser::*; match self { Entity(flags) => { buf.write_string(32767, "minecraft:entity"); buf.write_byte(*flags); } Vec2 => buf.write_string(32767, "minecraft:vec2"), Vec3 => buf.write_string(32767, "minecraft:vec3"), BlockPos => buf.write_string(32767, "minecraft:block_pos"), BlockState => buf.write_string(32767, "minecraft:block_state"), Integer(min, max) => { buf.write_string(32767, "brigadier:integer"); buf.write_byte(3); // Supply min and max value buf.write_int(*min); buf.write_int(*max); } Float(min, max) => { buf.write_string(32767, "brigadier:float"); buf.write_byte(3); buf.write_float(*min); buf.write_float(*max); } } } } pub struct CDeclareCommandsNode<'a> { pub flags: i8, pub children: &'a [i32], pub redirect_node: Option<i32>, pub name: Option<&'static str>, pub parser: Option<CDeclareCommandsNodeParser>, } pub struct CDeclareCommands<'a> { pub nodes: &'a [CDeclareCommandsNode<'a>], pub root_index: i32, } impl<'a> ClientBoundPacket for CDeclareCommands<'a> { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.nodes.len() as i32); for node in self.nodes { buf.write_byte(node.flags); buf.write_varint(node.children.len() as i32); for child in node.children { buf.write_varint(*child); } if let Some(redirect_node) = node.redirect_node { buf.write_varint(redirect_node); } if let Some(name) = node.name { buf.write_string(32767, name); } if let Some(parser) = &node.parser { parser.write(&mut buf); } } buf.write_varint(self.root_index); PacketEncoder::new(buf, 0x12) } } pub struct CWindowItems { pub window_id: u8, pub state_id: i32, pub slot_data: Vec<Option<SlotData>>, pub carried_item: Option<SlotData>, } impl ClientBoundPacket for CWindowItems { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_unsigned_byte(self.window_id); buf.write_varint(self.state_id); buf.write_varint(self.slot_data.len() as i32); for slot_data in &self.slot_data { buf.write_slot_data(slot_data); } buf.write_slot_data(&self.carried_item); PacketEncoder::new(buf, 0x14) } } pub struct CSetSlot { pub window_id: u8, pub slot: i16, pub slot_data: Option<SlotData>, } impl ClientBoundPacket for CSetSlot { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_slot_data(&self.slot_data); PacketEncoder::new(buf, 0x16) } } pub struct CPluginMessage { pub channel: String, pub data: Vec<u8>, } impl ClientBoundPacket for CPluginMessage { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_string(32767, &self.channel); buf.write_bytes(&self.data); PacketEncoder::new(buf, 0x18) } } pub struct CDisconnect { pub reason: String, } impl ClientBoundPacket for CDisconnect { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_string(32767, &self.reason); PacketEncoder::new(buf, 0x1A) } } #[derive(Debug)] pub struct CUnloadChunk { pub chunk_x: i32, pub chunk_z: i32, } impl ClientBoundPacket for CUnloadChunk { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_int(self.chunk_x); buf.write_int(self.chunk_z); PacketEncoder::new(buf, 0x1D) } } pub enum CChangeGameStateReason { ChangeGamemode, } pub struct CChangeGameState { pub reason: CChangeGameStateReason, pub value: f32, } impl ClientBoundPacket for CChangeGameState { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); match self.reason { CChangeGameStateReason::ChangeGamemode => buf.write_unsigned_byte(3), } buf.write_float(self.value); PacketEncoder::new(buf, 0x1E) } } pub struct CKeepAlive { pub id: i64, } impl ClientBoundPacket for CKeepAlive { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_long(self.id); PacketEncoder::new(buf, 0x21) } } pub struct CChunkDataSection { pub block_count: i16, pub bits_per_block: u8, pub palette: Option<Vec<i32>>, pub data_array: Vec<u64>, } pub struct CChunkData { pub chunk_x: i32, pub chunk_z: i32, pub primary_bit_mask: Vec<i64>, pub heightmaps: nbt::Blob, pub biomes: Vec<i32>, pub chunk_sections: Vec<CChunkDataSection>, pub block_entities: Vec<nbt::Blob>, } impl ClientBoundPacket for CChunkData { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_int(self.chunk_x); buf.write_int(self.chunk_z); buf.write_varint(self.primary_bit_mask.len() as i32); for long in &self.primary_bit_mask { buf.write_long(*long); } let mut heightmaps = Vec::new(); self.heightmaps.to_writer(&mut heightmaps).unwrap(); buf.write_bytes(&heightmaps); buf.write_varint(self.biomes.len() as i32); for biome in &self.biomes { buf.write_varint(*biome); } let mut data = Vec::new(); for chunk_section in &self.chunk_sections { data.write_short(chunk_section.block_count); data.write_unsigned_byte(chunk_section.bits_per_block); if let Some(palette) = &chunk_section.palette { data.write_varint(palette.len() as i32); for palette_entry in palette { data.write_varint(*palette_entry); } } data.write_varint(chunk_section.data_array.len() as i32); for long in &chunk_section.data_array { data.write_long(*long as i64); } } buf.write_varint(data.len() as i32); buf.write_bytes(&data); // Number of block entities buf.write_varint(self.block_entities.len() as i32); for block_entity in &self.block_entities { buf.write_nbt_blob(block_entity); } PacketEncoder::new(buf, 0x22) } } pub struct CEffect { pub effect_id: i32, pub x: i32, pub y: i32, pub z: i32, pub data: i32, pub disable_relative_volume: bool, } impl ClientBoundPacket for CEffect { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_int(self.effect_id); buf.write_position(self.x, self.y, self.z); buf.write_int(self.data); buf.write_bool(self.disable_relative_volume); PacketEncoder::new(buf, 0x23) } } #[derive(Serialize, Clone)] pub struct CJoinGameDimensionElement { pub natural: i8, pub ambient_light: f32, pub has_ceiling: i8, pub has_skylight: i8, pub fixed_time: i64, pub shrunk: i8, pub ultrawarm: i8, pub has_raids: i8, pub min_y: i32, pub height: i32, pub respawn_anchor_works: i8, pub bed_works: i8, pub piglin_safe: i8, pub coordinate_scale: f32, pub logical_height: i32, pub infiniburn: String, } #[derive(Serialize, Clone)] pub struct CJoinGameBiomeEffectsMoodSound { pub tick_delay: i32, pub offset: f32, pub sound: String, pub block_search_extent: i32, } #[derive(Serialize, Clone)] pub struct CJoinGameBiomeEffects { pub sky_color: i32, pub water_fog_color: i32, pub fog_color: i32, pub water_color: i32, pub mood_sound: CJoinGameBiomeEffectsMoodSound, } #[derive(Serialize, Clone)] pub struct CJoinGameBiomeElement { pub depth: f32, pub temperature: f32, pub downfall: f32, pub precipitation: String, pub category: String, pub scale: f32, pub effects: CJoinGameBiomeEffects, } pub struct CJoinGameDimensionCodec { pub dimensions: HashMap<String, CJoinGameDimensionElement>, pub biomes: HashMap<String, CJoinGameBiomeElement>, } #[derive(Serialize)] struct CJoinGameDimensionCodecInner { #[serde(rename = "minecraft:dimension_type")] pub dimensions: NBTMap<CJoinGameDimensionElement>, #[serde(rename = "minecraft:worldgen/biome")] pub biomes: NBTMap<CJoinGameBiomeElement>, } impl CJoinGameDimensionCodec { fn encode(&self, buf: &mut Vec<u8>) { let mut dimension_map: NBTMap<CJoinGameDimensionElement> = NBTMap::new("minecraft:dimension_type".to_owned()); for (name, element) in &self.dimensions { dimension_map.push_element(name.clone(), element.clone()); } let mut biome_map = NBTMap::new("minecraft:worldgen/biome".to_owned()); for (name, element) in &self.biomes { biome_map.push_element(name.clone(), element.clone()); } let codec = CJoinGameDimensionCodecInner { dimensions: dimension_map, biomes: biome_map, }; buf.write_nbt(&codec); } } pub struct CJoinGame { pub entity_id: i32, pub is_hardcore: bool, pub gamemode: u8, pub previous_gamemode: u8, pub world_count: i32, pub world_names: Vec<String>, pub dimension_codec: CJoinGameDimensionCodec, pub dimension: CJoinGameDimensionElement, pub world_name: String, pub hashed_seed: i64, pub max_players: i32, pub view_distance: i32, pub reduced_debug_info: bool, pub enable_respawn_screen: bool, pub is_debug: bool, pub is_flat: bool, } impl ClientBoundPacket for CJoinGame { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_int(self.entity_id); buf.write_bool(self.is_hardcore); buf.write_unsigned_byte(self.gamemode); buf.write_unsigned_byte(self.previous_gamemode); buf.write_varint(self.world_count); for world_name in &self.world_names { buf.write_string(32767, world_name); } self.dimension_codec.encode(&mut buf); buf.write_nbt(&self.dimension); buf.write_string(32767, &self.world_name); buf.write_long(self.hashed_seed); buf.write_varint(self.max_players); buf.write_varint(self.view_distance); buf.write_boolean(self.reduced_debug_info); buf.write_boolean(self.enable_respawn_screen); buf.write_boolean(self.is_debug); buf.write_boolean(self.is_flat); PacketEncoder::new(buf, 0x26) } } pub struct COpenSignEditor { pub pos_x: i32, pub pos_y: i32, pub pos_z: i32, } impl ClientBoundPacket for COpenSignEditor { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_position(self.pos_x, self.pos_y, self.pos_z); PacketEncoder::new(buf, 0x2F) } } pub struct CEntityPosition { pub entity_id: i32, pub delta_x: i16, pub delta_y: i16, pub delta_z: i16, pub on_ground: bool, } impl ClientBoundPacket for CEntityPosition { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); buf.write_short(self.delta_x); buf.write_short(self.delta_y); buf.write_short(self.delta_z); buf.write_bool(self.on_ground); PacketEncoder::new(buf, 0x29) } } pub struct CEntityPositionAndRotation { pub entity_id: i32, pub delta_x: i16, pub delta_y: i16, pub delta_z: i16, pub yaw: f32, pub pitch: f32, pub on_ground: bool, } impl ClientBoundPacket for CEntityPositionAndRotation { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); buf.write_short(self.delta_x); buf.write_short(self.delta_y); buf.write_short(self.delta_z); buf.write_byte(((self.yaw / 360f32 * 256f32) as i32 % 256) as i8); buf.write_byte(((self.pitch / 360f32 * 256f32) as i32 % 256) as i8); buf.write_bool(self.on_ground); PacketEncoder::new(buf, 0x2A) } } pub struct CEntityRotation { pub entity_id: i32, pub yaw: f32, pub pitch: f32, pub on_ground: bool, } impl ClientBoundPacket for CEntityRotation { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); buf.write_byte(((self.yaw / 360f32 * 256f32) as i32 % 256) as i8); buf.write_byte(((self.pitch / 360f32 * 256f32) as i32 % 256) as i8); buf.write_bool(self.on_ground); PacketEncoder::new(buf, 0x2B) } } pub struct COpenWindow { pub window_id: i32, pub window_type: i32, pub window_title: String, } impl ClientBoundPacket for COpenWindow { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.window_id); buf.write_varint(self.window_type); buf.write_string(32767, &self.window_title); PacketEncoder::new(buf, 0x2E) } } pub struct CPlayerAbilities { pub flags: u8, pub fly_speed: f32, pub fov_modifier: f32, } impl ClientBoundPacket for CPlayerAbilities { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_unsigned_byte(self.flags); buf.write_float(self.fly_speed); buf.write_float(self.fov_modifier); PacketEncoder::new(buf, 0x32) } } pub struct CPlayerInfoAddPlayerProperty { name: String, value: String, signature: Option<String>, } pub struct CPlayerInfoAddPlayer { pub uuid: u128, pub name: String, pub properties: Vec<CPlayerInfoAddPlayerProperty>, pub gamemode: i32, pub ping: i32, pub display_name: Option<String>, } pub enum CPlayerInfo { AddPlayer(Vec<CPlayerInfoAddPlayer>), RemovePlayer(Vec<u128>), UpdateGamemode(u128, Gamemode), } impl ClientBoundPacket for CPlayerInfo { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); match self { CPlayerInfo::AddPlayer(ps) => { buf.write_varint(0); buf.write_varint(ps.len() as i32); for p in ps { buf.write_uuid(p.uuid); buf.write_string(16, &p.name); buf.write_varint(p.properties.len() as i32); for prop in &p.properties { buf.write_string(32767, &prop.name); buf.write_string(32767, &prop.value); buf.write_boolean(prop.signature.is_some()); if let Some(signature) = &prop.signature { buf.write_string(32767, signature); } } buf.write_varint(p.gamemode); buf.write_varint(p.ping); buf.write_boolean(p.display_name.is_some()); if let Some(display_name) = &p.display_name { buf.write_string(32767, display_name); } } } CPlayerInfo::UpdateGamemode(uuid, gamemode) => { buf.write_varint(1); buf.write_varint(1); buf.write_uuid(*uuid); buf.write_varint(gamemode.get_id() as i32); } CPlayerInfo::RemovePlayer(uuids) => { buf.write_varint(4); buf.write_varint(uuids.len() as i32); for uuid in uuids { buf.write_uuid(*uuid); } } } PacketEncoder::new(buf, 0x36) } } pub struct CPlayerPositionAndLook { pub x: f64, pub y: f64, pub z: f64, pub yaw: f32, pub pitch: f32, pub flags: u8, pub teleport_id: i32, pub dismount_vehicle: bool, } impl ClientBoundPacket for CPlayerPositionAndLook { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_double(self.x); buf.write_double(self.y); buf.write_double(self.z); buf.write_float(self.yaw); buf.write_float(self.pitch); buf.write_unsigned_byte(self.flags); buf.write_varint(self.teleport_id); buf.write_bool(self.dismount_vehicle); PacketEncoder::new(buf, 0x38) } } pub struct CDestroyEntity { pub entity_id: i32, } impl ClientBoundPacket for CDestroyEntity { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); PacketEncoder::new(buf, 0x3A) } } pub struct CEntityHeadLook { pub entity_id: i32, pub yaw: f32, } impl ClientBoundPacket for CEntityHeadLook { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); buf.write_byte(((self.yaw / 360f32 * 256f32) as i32 % 256) as i8); PacketEncoder::new(buf, 0x3E) } } #[derive(Debug)] pub struct C3BMultiBlockChangeRecord { pub x: u8, pub y: u8, pub z: u8, pub block_id: u32, } #[derive(Debug)] pub struct CMultiBlockChange { pub chunk_x: i32, pub chunk_z: i32, pub chunk_y: u32, pub records: Vec<C3BMultiBlockChangeRecord>, } impl ClientBoundPacket for CMultiBlockChange { fn encode(&self) -> PacketEncoder { let mut buf = Vec::with_capacity(self.records.len() * 8 + 12); let pos = ((self.chunk_x as i64 & 0x3FFFFF) << 42) | ((self.chunk_z as i64 & 0x3FFFFF) << 20) | (self.chunk_y as i64 & 0xFFFFF); buf.write_long(pos); buf.write_bool(true); // Always inverse the preceding Update Light packet's "Trust Edges" bool buf.write_varint(self.records.len() as i32); // Length of record array for record in &self.records { let long = ((record.block_id as u64) << 12) | ((record.x as u64) << 8) | ((record.z as u64) << 4) | (record.y as u64); buf.write_varlong(long as i64); } PacketEncoder::new(buf, 0x3F) } } pub struct CHeldItemChange { pub slot: i8, } impl ClientBoundPacket for CHeldItemChange { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_byte(self.slot); PacketEncoder::new(buf, 0x48) } } pub struct CUpdateViewPosition { pub chunk_x: i32, pub chunk_z: i32, } impl ClientBoundPacket for CUpdateViewPosition { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.chunk_x); buf.write_varint(self.chunk_z); PacketEncoder::new(buf, 0x49) } } pub struct CEntityMetadataEntry { pub index: u8, pub metadata_type: i32, pub value: Vec<u8>, } pub struct CEntityMetadata { pub entity_id: i32, pub metadata: Vec<CEntityMetadataEntry>, } impl ClientBoundPacket for CEntityMetadata { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); for entry in &self.metadata { buf.write_unsigned_byte(entry.index); buf.write_varint(entry.metadata_type); buf.write_bytes(&entry.value); } buf.write_byte(-1); // 0xFF PacketEncoder::new(buf, 0x4D) } } pub struct CEntityEquipmentEquipment { pub slot: i32, pub item: Option<SlotData>, } pub struct CEntityEquipment { pub entity_id: i32, pub equipment: Vec<CEntityEquipmentEquipment>, } impl ClientBoundPacket for CEntityEquipment { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); for slot in &self.equipment { buf.write_varint(slot.slot); buf.write_slot_data(&slot.item); } PacketEncoder::new(buf, 0x50) } } pub struct CTimeUpdate { pub world_age: i64, pub time_of_day: i64, } impl ClientBoundPacket for CTimeUpdate { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_long(self.world_age); buf.write_long(self.time_of_day); PacketEncoder::new(buf, 0x58) } } pub struct CEntityTeleport { pub entity_id: i32, pub x: f64, pub y: f64, pub z: f64, pub yaw: f32, pub pitch: f32, pub on_ground: bool, } impl ClientBoundPacket for CEntityTeleport { fn encode(&self) -> PacketEncoder { let mut buf = Vec::new(); buf.write_varint(self.entity_id); buf.write_double(self.x); buf.write_double(self.y); buf.write_double(self.z); buf.write_byte(((self.yaw / 360f32 * 256f32) as i32 % 256) as i8); buf.write_byte(((self.pitch / 360f32 * 256f32) as i32 % 256) as i8); buf.write_bool(self.on_ground); PacketEncoder::new(buf, 0x61) } }
use super::common::*; use std::path::{Path, PathBuf}; use anyhow::{Result, anyhow, bail}; use std::collections::{BTreeMap, BTreeSet}; use serde::Deserialize; #[derive(Debug)] pub struct Mandir { cat: Catalogue, manpath: Vec<PathBuf>, mandoc: PathBuf, sections: BTreeSet<String>, subsections: BTreeSet<String>, } #[derive(Debug, Deserialize)] struct Catalogue { sections: BTreeMap<String, String>, subsections: BTreeMap<String, String>, } #[derive(Debug)] pub struct TocSection { pub name: String, pub title: Option<String>, pub subsections: Vec<TocSubsection>, } #[derive(Debug)] pub struct TocSubsection { pub name: String, pub title: Option<String>, } impl Mandir { pub fn new<P1, P2>(cat: P1, mandoc: P2) -> Result<Mandir> where P1: AsRef<Path>, P2: AsRef<Path>, { let catpath = cat.as_ref(); let cat: Catalogue = jmclib::toml::read_file(catpath)? .ok_or(anyhow!("catalogue file {}", catpath.display()))?; Ok(Mandir { cat, manpath: Vec::new(), mandoc: mandoc.as_ref().to_path_buf(), sections: BTreeSet::new(), subsections: BTreeSet::new(), }) } pub fn add_mandir<P: AsRef<Path>>(&mut self, manpath: P) -> Result<()> { let manpath = manpath.as_ref(); let mut rd = std::fs::read_dir(manpath)?; while let Some(ent) = rd.next().transpose()? { if !ent.file_type()?.is_dir() { continue; } let n = ent .file_name().to_str().unwrap() .to_string(); if !n.starts_with("man") { continue; } self.sections.insert(n[3..4].to_string()); self.subsections.insert(n[3..].to_string()); } self.manpath.push(manpath.to_path_buf()); Ok(()) } pub fn pages(&self, sect: &str) -> Result<Vec<String>> { let sect = sect.trim().to_lowercase(); let trailer = format!(".{}", sect); let mut pagelist: Vec<String> = Vec::new(); for mandir in self.manpath.iter() { let mut d = mandir.clone(); d.push(&format!("man{}", sect)); let mut rd = std::fs::read_dir(&d)?; while let Some(ent) = rd.next().transpose()? { let n = ent .file_name().to_str().unwrap() .trim_end_matches(&trailer) .to_string(); pagelist.push(n); } } pagelist.sort(); Ok(pagelist) } pub fn index(&self) -> Result<Vec<TocSection>> { let mut out = Vec::new(); for sect in self.sections.iter() { let name = sect.to_string(); let title = self.cat.sections.get(&name).map(|s| s.to_string()); let subsections = self.subsections.iter() .filter(|ss| ss.starts_with(sect)) .map(|ss| TocSubsection { name: ss.to_string(), title: self.cat.subsections.get(ss) .map(|s| s.to_string()), }) .collect(); out.push(TocSection { name, title, subsections }); } Ok(out) } pub fn lookup(&self, sect: Option<&str>, page: &str) -> Result<PathBuf> { /* * First, validate the section if one was provided. */ let sects = if let Some(sect) = &sect { if !self.subsections.contains(*sect) { bail!("unknown section: {}", sect); } vec![sect.to_string()] } else { self.subsections.iter().map(|s| s.to_string()).collect() }; if page.contains('/') { bail!("invalid page: {}", page); } for mandir in self.manpath.iter() { for sect in sects.iter() { let mut fp = mandir.clone(); fp.push(&format!("man{}", sect)); fp.push(&format!("{}.{}", page, sect)); match std::fs::metadata(&fp) { Ok(st) if st.is_file() => return Ok(fp), _ => continue, } } } bail!("page not found"); } }
#[doc = "Register `I3C_TIMINGR0` reader"] pub type R = crate::R<I3C_TIMINGR0_SPEC>; #[doc = "Register `I3C_TIMINGR0` writer"] pub type W = crate::W<I3C_TIMINGR0_SPEC>; #[doc = "Field `SCLL_PP` reader - SCL low duration in I3C push-pull phases, in number of kernel clocks cycles: tSCLL_PP = (SCLL_PP + 1) x tI3CCLK SCLL_PP is used to generate tLOW (I3C) timing."] pub type SCLL_PP_R = crate::FieldReader; #[doc = "Field `SCLL_PP` writer - SCL low duration in I3C push-pull phases, in number of kernel clocks cycles: tSCLL_PP = (SCLL_PP + 1) x tI3CCLK SCLL_PP is used to generate tLOW (I3C) timing."] pub type SCLL_PP_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 8, O>; #[doc = "Field `SCLH_I3C` reader - SCL high duration, used for I3C messages (both in push-pull and open-drain phases), in number of kernel clocks cycles: tSCLH_I3C = (SCLH_I3C + 1) x tI3CCLK SCLH_I3C is used to generate both tHIGH (I3C) and tHIGH_MIXED timings."] pub type SCLH_I3C_R = crate::FieldReader; #[doc = "Field `SCLH_I3C` writer - SCL high duration, used for I3C messages (both in push-pull and open-drain phases), in number of kernel clocks cycles: tSCLH_I3C = (SCLH_I3C + 1) x tI3CCLK SCLH_I3C is used to generate both tHIGH (I3C) and tHIGH_MIXED timings."] pub type SCLH_I3C_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 8, O>; #[doc = "Field `SCLL_OD` reader - SCL low duration in open-drain phases, used for legacy I2C commands and for I3C open-drain phases (address header phase following a START, not a Repeated START), in number of kernel clocks cycles: tSCLL_OD = (SCLL_OD + 1) x tI3CCLK SCLL_OD is used to generate both tLOW (I2C) and tLOW_OD timings (max. of the two)."] pub type SCLL_OD_R = crate::FieldReader; #[doc = "Field `SCLL_OD` writer - SCL low duration in open-drain phases, used for legacy I2C commands and for I3C open-drain phases (address header phase following a START, not a Repeated START), in number of kernel clocks cycles: tSCLL_OD = (SCLL_OD + 1) x tI3CCLK SCLL_OD is used to generate both tLOW (I2C) and tLOW_OD timings (max. of the two)."] pub type SCLL_OD_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 8, O>; #[doc = "Field `SCLH_I2C` reader - SCL high duration, used for legacy I2C commands, in number of kernel clocks cycles: tSCLH_I2C = (SCLH_I2C + 1) x tI3CCLK SCLH_I2C is used to generate tHIGH (I2C) timing."] pub type SCLH_I2C_R = crate::FieldReader; #[doc = "Field `SCLH_I2C` writer - SCL high duration, used for legacy I2C commands, in number of kernel clocks cycles: tSCLH_I2C = (SCLH_I2C + 1) x tI3CCLK SCLH_I2C is used to generate tHIGH (I2C) timing."] pub type SCLH_I2C_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 8, O>; impl R { #[doc = "Bits 0:7 - SCL low duration in I3C push-pull phases, in number of kernel clocks cycles: tSCLL_PP = (SCLL_PP + 1) x tI3CCLK SCLL_PP is used to generate tLOW (I3C) timing."] #[inline(always)] pub fn scll_pp(&self) -> SCLL_PP_R { SCLL_PP_R::new((self.bits & 0xff) as u8) } #[doc = "Bits 8:15 - SCL high duration, used for I3C messages (both in push-pull and open-drain phases), in number of kernel clocks cycles: tSCLH_I3C = (SCLH_I3C + 1) x tI3CCLK SCLH_I3C is used to generate both tHIGH (I3C) and tHIGH_MIXED timings."] #[inline(always)] pub fn sclh_i3c(&self) -> SCLH_I3C_R { SCLH_I3C_R::new(((self.bits >> 8) & 0xff) as u8) } #[doc = "Bits 16:23 - SCL low duration in open-drain phases, used for legacy I2C commands and for I3C open-drain phases (address header phase following a START, not a Repeated START), in number of kernel clocks cycles: tSCLL_OD = (SCLL_OD + 1) x tI3CCLK SCLL_OD is used to generate both tLOW (I2C) and tLOW_OD timings (max. of the two)."] #[inline(always)] pub fn scll_od(&self) -> SCLL_OD_R { SCLL_OD_R::new(((self.bits >> 16) & 0xff) as u8) } #[doc = "Bits 24:31 - SCL high duration, used for legacy I2C commands, in number of kernel clocks cycles: tSCLH_I2C = (SCLH_I2C + 1) x tI3CCLK SCLH_I2C is used to generate tHIGH (I2C) timing."] #[inline(always)] pub fn sclh_i2c(&self) -> SCLH_I2C_R { SCLH_I2C_R::new(((self.bits >> 24) & 0xff) as u8) } } impl W { #[doc = "Bits 0:7 - SCL low duration in I3C push-pull phases, in number of kernel clocks cycles: tSCLL_PP = (SCLL_PP + 1) x tI3CCLK SCLL_PP is used to generate tLOW (I3C) timing."] #[inline(always)] #[must_use] pub fn scll_pp(&mut self) -> SCLL_PP_W<I3C_TIMINGR0_SPEC, 0> { SCLL_PP_W::new(self) } #[doc = "Bits 8:15 - SCL high duration, used for I3C messages (both in push-pull and open-drain phases), in number of kernel clocks cycles: tSCLH_I3C = (SCLH_I3C + 1) x tI3CCLK SCLH_I3C is used to generate both tHIGH (I3C) and tHIGH_MIXED timings."] #[inline(always)] #[must_use] pub fn sclh_i3c(&mut self) -> SCLH_I3C_W<I3C_TIMINGR0_SPEC, 8> { SCLH_I3C_W::new(self) } #[doc = "Bits 16:23 - SCL low duration in open-drain phases, used for legacy I2C commands and for I3C open-drain phases (address header phase following a START, not a Repeated START), in number of kernel clocks cycles: tSCLL_OD = (SCLL_OD + 1) x tI3CCLK SCLL_OD is used to generate both tLOW (I2C) and tLOW_OD timings (max. of the two)."] #[inline(always)] #[must_use] pub fn scll_od(&mut self) -> SCLL_OD_W<I3C_TIMINGR0_SPEC, 16> { SCLL_OD_W::new(self) } #[doc = "Bits 24:31 - SCL high duration, used for legacy I2C commands, in number of kernel clocks cycles: tSCLH_I2C = (SCLH_I2C + 1) x tI3CCLK SCLH_I2C is used to generate tHIGH (I2C) timing."] #[inline(always)] #[must_use] pub fn sclh_i2c(&mut self) -> SCLH_I2C_W<I3C_TIMINGR0_SPEC, 24> { SCLH_I2C_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "I3C timing register 0\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`i3c_timingr0::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`i3c_timingr0::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct I3C_TIMINGR0_SPEC; impl crate::RegisterSpec for I3C_TIMINGR0_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`i3c_timingr0::R`](R) reader structure"] impl crate::Readable for I3C_TIMINGR0_SPEC {} #[doc = "`write(|w| ..)` method takes [`i3c_timingr0::W`](W) writer structure"] impl crate::Writable for I3C_TIMINGR0_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets I3C_TIMINGR0 to value 0"] impl crate::Resettable for I3C_TIMINGR0_SPEC { const RESET_VALUE: Self::Ux = 0; }
use embedded_hal::blocking::delay::DelayMs; use embedded_hal::blocking::spi; use embedded_hal::digital::v2::{InputPin, OutputPin}; struct EpdInterface<SPI, RST, BUSY, ECS, DC> { spi: SPI, rst: RST, busy: BUSY, ecs: ECS, dc: DC, } impl<SPI, RST, BUSY, ECS, DC> EpdInterface<SPI, RST, BUSY, ECS, DC> where SPI: spi::Write<u8>, RST: OutputPin, BUSY: InputPin, ECS: OutputPin, DC: OutputPin, { const CMD_PSR: u8 = 0x00; const CMD_PWR: u8 = 0x01; const CMD_PON: u8 = 0x04; const CMD_BTST: u8 = 0x06; const CMD_DTM1: u8 = 0x10; const CMD_DRF: u8 = 0x12; const CMD_DTM2: u8 = 0x13; const CMD_PLL: u8 = 0x30; const CMD_CDI: u8 = 0x50; const CMD_TRES: u8 = 0x61; const CMD_VDCS: u8 = 0x82; const DISPLAY_WIDTH: usize = 152; const DISPLAY_HEIGHT: usize = 152; const DISPLAY_HORIZONTAL_BANKS: usize = (Self::DISPLAY_WIDTH + 7) / 8; const DISPLAY_VERTICAL_BANKS: usize = (Self::DISPLAY_HEIGHT + 7) / 8; const FRAME_BYTES: usize = Self::DISPLAY_WIDTH * Self::DISPLAY_HEIGHT / 8; pub fn new<DELAY: DelayMs<u8>>( spi: SPI, mut rst: RST, busy: BUSY, mut ecs: ECS, dc: DC, delay: &mut DELAY, ) -> Self { let mut sself = Self { spi, rst, busy, ecs, dc, }; sself.reset(delay); sself.cmd(Self::CMD_PWR, &[0x03, 0x00, 0x2b, 0x2b, 0x09]); sself.cmd(Self::CMD_BTST, &[0x17, 0x17, 0x17]); sself.cmd(Self::CMD_PON, &[]); // T_{pwr_on} = 80ms from datasheet delay.delay_ms(100); sself.cmd(Self::CMD_PSR, &[0xcf]); sself.cmd(Self::CMD_CDI, &[0x37]); sself.cmd(Self::CMD_PLL, &[0x29]); sself.cmd(Self::CMD_VDCS, &[0x0a]); // Adafruit library has a bit of a delay here for some reason // (stabilising?) delay.delay_ms(10); let display_height_bytes = (Self::DISPLAY_HEIGHT as u16).to_be_bytes(); sself.cmd( Self::CMD_TRES, &[ Self::DISPLAY_WIDTH as u8, display_height_bytes[0], display_height_bytes[1], ], ); sself } fn reset<DELAY: DelayMs<u8>>(&mut self, delay: &mut DELAY) { self.rst.set_low().ok(); delay.delay_ms(100); self.rst.set_high().ok(); } pub fn refresh(&mut self, black: &[u8; 2888], red: &[u8; 2888]) { self.cmd(Self::CMD_DTM1, black); self.cmd(Self::CMD_DTM2, red); self.cmd(Self::CMD_DRF, &[]); while self.busy.is_high().ok().unwrap() {} } fn cmd(&mut self, cmd: u8, bytes: &[u8]) { // select self.ecs.set_low().ok(); // command phase self.dc.set_low().ok(); self.spi.write(&[cmd]).ok(); // data phase if bytes.len() != 0 { self.dc.set_high().ok(); self.spi.write(bytes).ok(); } // deselect self.ecs.set_high().ok(); } } pub struct Epd<SPI, RST, BUSY, ECS, DC> { interface: EpdInterface<SPI, RST, BUSY, ECS, DC>, pub framebuffer_black: [u8; 2888], pub framebuffer_red: [u8; 2888], } impl<SPI, RST, BUSY, ECS, DC> Epd<SPI, RST, BUSY, ECS, DC> where SPI: spi::Write<u8>, RST: OutputPin, BUSY: InputPin, ECS: OutputPin, DC: OutputPin, { pub fn new<DELAY: DelayMs<u8>>( spi: SPI, mut rst: RST, busy: BUSY, mut ecs: ECS, dc: DC, delay: &mut DELAY, ) -> Self { Self { interface: EpdInterface::new(spi, rst, busy, ecs, dc, delay), framebuffer_black: [0xff; 2888], framebuffer_red: [0xff; 2888], } } pub fn refresh(&mut self) { self.interface .refresh(&self.framebuffer_black, &self.framebuffer_red); } }
#[doc = "Register `WKUPCR` reader"] pub type R = crate::R<WKUPCR_SPEC>; #[doc = "Register `WKUPCR` writer"] pub type W = crate::W<WKUPCR_SPEC>; #[doc = "Field `WKUPC` reader - Clear Wakeup pin flag for WKUP. These bits are always read as 0."] pub type WKUPC_R = crate::FieldReader; #[doc = "Field `WKUPC` writer - Clear Wakeup pin flag for WKUP. These bits are always read as 0."] pub type WKUPC_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 6, O>; impl R { #[doc = "Bits 0:5 - Clear Wakeup pin flag for WKUP. These bits are always read as 0."] #[inline(always)] pub fn wkupc(&self) -> WKUPC_R { WKUPC_R::new((self.bits & 0x3f) as u8) } } impl W { #[doc = "Bits 0:5 - Clear Wakeup pin flag for WKUP. These bits are always read as 0."] #[inline(always)] #[must_use] pub fn wkupc(&mut self) -> WKUPC_W<WKUPCR_SPEC, 0> { WKUPC_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "reset only by system reset, not reset by wakeup from Standby mode5 wait states are required when writing this register (when clearing a WKUPF bit in PWR_WKUPFR, the AHB write access will complete after the WKUPF has been cleared).\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`wkupcr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`wkupcr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct WKUPCR_SPEC; impl crate::RegisterSpec for WKUPCR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`wkupcr::R`](R) reader structure"] impl crate::Readable for WKUPCR_SPEC {} #[doc = "`write(|w| ..)` method takes [`wkupcr::W`](W) writer structure"] impl crate::Writable for WKUPCR_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets WKUPCR to value 0"] impl crate::Resettable for WKUPCR_SPEC { const RESET_VALUE: Self::Ux = 0; }
use crate::{Entity, Log, OnRemove}; use attached::Var; use parking_lot::RwLock; pub type Deps = RwLock<Vec<Box<dyn Fn(&mut Vars) + Send + Sync>>>; pub type LogVar<T> = Var<T, vars::Log>; pub type TblVar<T> = Var<T, vars::Tbl>; pub trait Accessor: Sized + 'static { fn var() -> &'static TblVar<Self>; fn deps() -> &'static Deps; fn clear(ctx: &mut Vars) { ctx.clear(Self::var()); Self::clear_deps(ctx); } fn clear_deps(ctx: &mut Vars) { clear_deps(Self::deps(), ctx); } fn register_deps<F: Fn(&mut Vars) + Send + Sync + 'static>(f: F) { register_deps(Self::deps(), Box::new(f)); } } fn clear_deps(deps: &'static Deps, ctx: &mut Vars) { deps.read().iter().for_each(|f| f(ctx)); } fn register_deps(deps: &'static Deps, f: Box<dyn Fn(&mut Vars) + Send + Sync + 'static>) { deps.write().push(f); } pub trait EntityAccessor: Entity + Sized + 'static { type Tbl: Send + Sync; fn entity_var() -> &'static TblVar<Self::Tbl>; fn entity_deps() -> &'static Deps; fn on_remove() -> &'static OnRemove<Self>; } pub trait LogAccessor: Entity + Sized + 'static { fn log_var() -> &'static LogVar<Log<Self>>; } // typed variable contexts pub type LogsVar = attached::Vars<vars::Log>; pub type Vars = attached::Vars<vars::Tbl>; pub(crate) mod vars { use attached::var_ctx; var_ctx!(pub Tbl); var_ctx!(pub Log); }
fn main() { let test = vec!["one", "two", "three"]; let index = test.iter().position(|&r| r == "two").unwrap(); println!("{}", index); }
extern crate iron; extern crate router; use std::io::Read; use iron::prelude::*; use iron::status; use router::Router; use std::io::prelude::*; use std::fs::File; use std::io::BufWriter; fn main(){ MainLoop(); } fn MainLoop(){ let mut router = Router::new(); router.post("/", catchIP, "catchIP"); Iron::new(router).http("localhost:80").unwrap(); fn catchIP(req: &mut Request) -> IronResult<Response>{ let mut body = String::new(); req.body.read_to_string(&mut body) .expect("Failed to read line"); println!("{}", body); //FileOutPut(body); let res = "catch ".to_string() + &body; Ok(Response::with((status::Ok, res))) } } /* fn FileOutPut(ipaddress: String){ let file = File::open("database.txt").unwrap(); let mut w = BufWriter::new(file); write!(w, "ipaddress").unwrap(); w.flush().unwrap(); } */
fn main() { // let my_vector: Vec<i8> = Vec::new(); let mut my_vector = vec![1, 2, 3, 4, 5]; println!("Vector at 2: {}", my_vector[2]); my_vector.push(6); my_vector.remove(2); for n in my_vector { println!("{}", n); } }
#[doc = "Register `MISR` reader"] pub type R = crate::R<MISR_SPEC>; #[doc = "Field `ALRAMF` reader - Alarm A masked flag"] pub type ALRAMF_R = crate::BitReader<ALRAMF_A>; #[doc = "Alarm A masked flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ALRAMF_A { #[doc = "1: This flag is set by hardware when the time/date registers (RTC_TR and RTC_DR) match the Alarm A register (RTC_ALRMAR)"] Match = 1, } impl From<ALRAMF_A> for bool { #[inline(always)] fn from(variant: ALRAMF_A) -> Self { variant as u8 != 0 } } impl ALRAMF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<ALRAMF_A> { match self.bits { true => Some(ALRAMF_A::Match), _ => None, } } #[doc = "This flag is set by hardware when the time/date registers (RTC_TR and RTC_DR) match the Alarm A register (RTC_ALRMAR)"] #[inline(always)] pub fn is_match(&self) -> bool { *self == ALRAMF_A::Match } } #[doc = "Field `ALRBMF` reader - Alarm B masked flag"] pub type ALRBMF_R = crate::BitReader<ALRBMF_A>; #[doc = "Alarm B masked flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ALRBMF_A { #[doc = "1: This flag is set by hardware when the time/date registers (RTC_TR and RTC_DR) match the Alarm B register (RTC_ALRMBR)"] Match = 1, } impl From<ALRBMF_A> for bool { #[inline(always)] fn from(variant: ALRBMF_A) -> Self { variant as u8 != 0 } } impl ALRBMF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<ALRBMF_A> { match self.bits { true => Some(ALRBMF_A::Match), _ => None, } } #[doc = "This flag is set by hardware when the time/date registers (RTC_TR and RTC_DR) match the Alarm B register (RTC_ALRMBR)"] #[inline(always)] pub fn is_match(&self) -> bool { *self == ALRBMF_A::Match } } #[doc = "Field `WUTMF` reader - Wakeup timer masked flag"] pub type WUTMF_R = crate::BitReader<WUTMF_A>; #[doc = "Wakeup timer masked flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum WUTMF_A { #[doc = "1: This flag is set by hardware when the wakeup auto-reload counter reaches 0"] Zero = 1, } impl From<WUTMF_A> for bool { #[inline(always)] fn from(variant: WUTMF_A) -> Self { variant as u8 != 0 } } impl WUTMF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<WUTMF_A> { match self.bits { true => Some(WUTMF_A::Zero), _ => None, } } #[doc = "This flag is set by hardware when the wakeup auto-reload counter reaches 0"] #[inline(always)] pub fn is_zero(&self) -> bool { *self == WUTMF_A::Zero } } #[doc = "Field `TSMF` reader - Timestamp masked flag"] pub type TSMF_R = crate::BitReader<TSMF_A>; #[doc = "Timestamp masked flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TSMF_A { #[doc = "1: This flag is set by hardware when a time-stamp event occurs"] TimestampEvent = 1, } impl From<TSMF_A> for bool { #[inline(always)] fn from(variant: TSMF_A) -> Self { variant as u8 != 0 } } impl TSMF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<TSMF_A> { match self.bits { true => Some(TSMF_A::TimestampEvent), _ => None, } } #[doc = "This flag is set by hardware when a time-stamp event occurs"] #[inline(always)] pub fn is_timestamp_event(&self) -> bool { *self == TSMF_A::TimestampEvent } } #[doc = "Field `TSOVMF` reader - Timestamp overflow masked flag"] pub type TSOVMF_R = crate::BitReader<TSOVMF_A>; #[doc = "Timestamp overflow masked flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TSOVMF_A { #[doc = "1: This flag is set by hardware when a time-stamp event occurs while TSF is already set"] Overflow = 1, } impl From<TSOVMF_A> for bool { #[inline(always)] fn from(variant: TSOVMF_A) -> Self { variant as u8 != 0 } } impl TSOVMF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<TSOVMF_A> { match self.bits { true => Some(TSOVMF_A::Overflow), _ => None, } } #[doc = "This flag is set by hardware when a time-stamp event occurs while TSF is already set"] #[inline(always)] pub fn is_overflow(&self) -> bool { *self == TSOVMF_A::Overflow } } #[doc = "Field `ITSMF` reader - Internal timestamp masked flag"] pub type ITSMF_R = crate::BitReader<ITSMF_A>; #[doc = "Internal timestamp masked flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum ITSMF_A { #[doc = "1: This flag is set by hardware when a timestamp on the internal event occurs"] TimestampEvent = 1, } impl From<ITSMF_A> for bool { #[inline(always)] fn from(variant: ITSMF_A) -> Self { variant as u8 != 0 } } impl ITSMF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<ITSMF_A> { match self.bits { true => Some(ITSMF_A::TimestampEvent), _ => None, } } #[doc = "This flag is set by hardware when a timestamp on the internal event occurs"] #[inline(always)] pub fn is_timestamp_event(&self) -> bool { *self == ITSMF_A::TimestampEvent } } #[doc = "Field `SSRUMF` reader - SSR underflow masked flag"] pub type SSRUMF_R = crate::BitReader<SSRUMF_A>; #[doc = "SSR underflow masked flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum SSRUMF_A { #[doc = "1: This flag is set by hardware when the SSR rolls under 0. SSRUF is not set when SSCLR=1"] Underflow = 1, } impl From<SSRUMF_A> for bool { #[inline(always)] fn from(variant: SSRUMF_A) -> Self { variant as u8 != 0 } } impl SSRUMF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<SSRUMF_A> { match self.bits { true => Some(SSRUMF_A::Underflow), _ => None, } } #[doc = "This flag is set by hardware when the SSR rolls under 0. SSRUF is not set when SSCLR=1"] #[inline(always)] pub fn is_underflow(&self) -> bool { *self == SSRUMF_A::Underflow } } impl R { #[doc = "Bit 0 - Alarm A masked flag"] #[inline(always)] pub fn alramf(&self) -> ALRAMF_R { ALRAMF_R::new((self.bits & 1) != 0) } #[doc = "Bit 1 - Alarm B masked flag"] #[inline(always)] pub fn alrbmf(&self) -> ALRBMF_R { ALRBMF_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 2 - Wakeup timer masked flag"] #[inline(always)] pub fn wutmf(&self) -> WUTMF_R { WUTMF_R::new(((self.bits >> 2) & 1) != 0) } #[doc = "Bit 3 - Timestamp masked flag"] #[inline(always)] pub fn tsmf(&self) -> TSMF_R { TSMF_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bit 4 - Timestamp overflow masked flag"] #[inline(always)] pub fn tsovmf(&self) -> TSOVMF_R { TSOVMF_R::new(((self.bits >> 4) & 1) != 0) } #[doc = "Bit 5 - Internal timestamp masked flag"] #[inline(always)] pub fn itsmf(&self) -> ITSMF_R { ITSMF_R::new(((self.bits >> 5) & 1) != 0) } #[doc = "Bit 6 - SSR underflow masked flag"] #[inline(always)] pub fn ssrumf(&self) -> SSRUMF_R { SSRUMF_R::new(((self.bits >> 6) & 1) != 0) } } #[doc = "Masked interrupt status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`misr::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct MISR_SPEC; impl crate::RegisterSpec for MISR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`misr::R`](R) reader structure"] impl crate::Readable for MISR_SPEC {} #[doc = "`reset()` method sets MISR to value 0"] impl crate::Resettable for MISR_SPEC { const RESET_VALUE: Self::Ux = 0; }
use libbeaglebone::enums::DeviceState; use libbeaglebone::pwm::PWM; use libbeaglebone::pwm::PWMState; use crate::pinouts::analog::output::AnalogOutput; use crate::pinouts::analog::output::PwmOutput; pub struct LibBeagleBonePwm { pwm: PWM, period: u32, } impl AnalogOutput for LibBeagleBonePwm { fn set_value(&mut self, val: f32) { if let Err(e) = self.pwm.set_duty_cycle((val * self.period as f32) as u32) { error!("{}", e); } } } impl PwmOutput for LibBeagleBonePwm { fn set_pulse_duty_cycle(&mut self, val: u32) { if let Err(e) = self.pwm.set_duty_cycle(val) { error!("{}", e); } if let Err(e) = self.pwm.set_period(val) { error!("{}", e); } } fn set_period(&mut self, val: u32) { if let Err(e) = self.pwm.set_period(val) { error!("{}", e); } self.period = val; } } impl LibBeagleBonePwm { pub fn new(chip: u8, num: u8) -> Self { let mut pwm = PWM::new(chip, num); if let Err(e) = pwm.set_period(20_000) { error!("{}", e); } if let Err(e) = pwm.set_export(DeviceState::Exported) { error!("{}", e); } if let Err(e) = pwm.set_state(PWMState::Enabled) { error!("{}", e); } Self { pwm, period: 20_000, } } }
use advent20::input_string; use anyhow::*; use itertools::iproduct; fn part1(input: &[u32]) -> Option<u32> { iproduct!(input, input) .filter(|(&x, &y)| x + y == 2020) .next() .map(|(x, y)| x * y) } fn part2(input: &[u32]) -> Option<u32> { iproduct!(input, input, input) .filter(|(&x, &y, &z)| x + y + z == 2020) .next() .map(|(x, y, z)| x * y * z) } fn main() -> Result<()> { let code = input_string()?; let nums = code .lines() .map(|l| l.parse::<u32>().context("Failed to parse input!")) .collect::<Result<Vec<_>>>()?; println!( "part 1: {}", part1(&nums).ok_or(format_err!("no result found!"))? ); println!( "part 2: {}", part2(&nums).ok_or(format_err!("no result found!"))? ); Ok(()) }
use specs::Join; pub struct AttractedSystem { collided: Vec<(::specs::Entity, f32)>, } impl AttractedSystem { pub fn new() -> Self { AttractedSystem { collided: vec![] } } } impl<'a> ::specs::System<'a> for AttractedSystem { type SystemData = ( ::specs::ReadStorage<'a, ::component::Player>, ::specs::ReadStorage<'a, ::component::PhysicBody>, ::specs::WriteStorage<'a, ::component::Attracted>, ::specs::WriteStorage<'a, ::component::Momentum>, ::specs::Fetch<'a, ::resource::PhysicWorld>, ::specs::Fetch<'a, ::resource::UpdateTime>, ::specs::Fetch<'a, ::resource::Audio>, ); fn run(&mut self, (players, bodies, mut attracteds, mut momentums, physic_world, update_time, audio): Self::SystemData) { let player_pos = { let (_, player_body) = (&players, &bodies).join().next().unwrap(); player_body.get(&physic_world).position().clone() }; for (attracted, momentum, body) in (&mut attracteds, &mut momentums, &bodies).join() { let pos = body.get(&physic_world).position(); attracted.last_update += update_time.0; while attracted.last_update >= 0.0 { attracted.last_update -= ::CONFIG.attracted_update_time; let ray = ::ncollide::query::Ray { origin: ::na::Point3::from_coordinates(pos.translation.vector), dir: player_pos.translation.vector - pos.translation.vector, }; let mut group = ::nphysics::object::RigidBodyCollisionGroups::new_dynamic(); group.set_membership(&[::entity::ATTRACTED_VISION_GROUP]); group.set_whitelist(&[::entity::PLAYER_GROUP, ::entity::WALL_GROUP]); self.collided.clear(); for (other_body, collision) in physic_world .collision_world() .interferences_with_ray(&ray, &group.as_collision_groups()) { if let ::nphysics::object::WorldObject::RigidBody(other_body) = other_body.data { let other_entity = ::component::PhysicBody::entity(physic_world.rigid_body(other_body)); self.collided.push((other_entity, collision.toi)); } } self.collided.sort_by(|a, b| (a.1).partial_cmp(&b.1).unwrap()); if self.collided.first().iter().any(|&&(e, _)| players.get(e).is_some()) { momentum.direction = ray.dir; audio.play(::audio::Sound::Attracted, pos.translation.vector.into()); } else { momentum.direction = ::na::zero(); } } } } }
use crate::camera::{Camera, CameraConfig}; use crate::color::color; use crate::hittable::{ box3d::Box3D, bvh::BvhNode, hittable_list::HittableList, rect::{XyRect, XzRect, YzRect}, rotate::{RotateX, RotateY, RotateZ}, Hittables, }; use crate::material::{diffuse::Diffuse, lambertian::Lambertian}; use crate::scenes::Scene; use crate::texture::solidcolor::SolidColor; use crate::vec::vec3; use std::sync::Arc; #[allow(dead_code)] pub fn rotate_test(t0: f64, t1: f64, aspect_ratio: f64) -> Scene { let camera = Camera::new(CameraConfig { lookfrom: vec3(278.0, 278.0, -800.0), lookat: vec3(278.0, 278.0, 0.0), vup: vec3(0.0, 1.0, 0.0), vfov: 40.0, aspect_ratio: aspect_ratio, aperture: 0.1, focus_dist: 10.0, time0: t0, time1: t1, background: color(0.0, 0.0, 0.0), }); // Put a box on each axis, rotate by 30 degrees let red = Lambertian::new(SolidColor::new(0.65, 0.05, 0.05)); let white = Lambertian::new(SolidColor::new(0.73, 0.73, 0.73)); let green = Lambertian::new(SolidColor::new(0.12, 0.45, 0.15)); let light = Diffuse::new(SolidColor::new(15.0, 15.0, 15.0)); let wall1 = YzRect::new(0.0, 600.0, 0.0, 600.0, 600.0, green.clone()); let wall2 = YzRect::new(0.0, 600.0, 0.0, 600.0, 0.0, red.clone()); let wall3 = XzRect::new(0.0, 600.0, 0.0, 600.0, 0.0, white.clone()); let wall4 = XzRect::new(0.0, 600.0, 0.0, 600.0, 600.0, white.clone()); let wall5 = XyRect::new(0.0, 600.0, 0.0, 600.0, 600.0, white.clone()); let box1 = Box3D::new( vec3(0.0, 250.0, 0.0), vec3(100.0, 350.0, 100.0), white.clone(), ); let box2 = Box3D::new( vec3(0.0, 0.0, 250.0), vec3(100.0, 100.0, 350.0), green.clone(), ); let box3 = Box3D::new( vec3(250.0, 0.0, 0.0), vec3(350.0, 100.0, 100.0), green.clone(), ); let box1 = RotateY::new(Arc::new(box1), 30.0); let box2 = RotateZ::new(Arc::new(box2), 30.0); let box3 = RotateX::new(Arc::new(box3), 30.0); let light = XzRect::new(213.0, 343.0, 227.0, 332.0, 599.0, light.clone()); let mut world = HittableList { hittables: Vec::new(), }; world.add(wall1); world.add(wall2); world.add(wall3); world.add(wall4); world.add(wall5); world.add(box1); world.add(box2); world.add(box3); world.add(light); return Scene { camera: camera, hittables: Hittables::from(BvhNode::new(world, t0, t1)), lights: Hittables::from(HittableList { hittables: Vec::new(), }), }; }
extern crate proc_macro; use sde_specs::Schema; use sde_specs_macro::Schema; #[derive(Schema)] pub struct Point { pub x: u32, pub y: u32, } fn main() { Point::to_schema(); let sample = syn::parse_str::<syn::Item>("pub struct Point { pub x: u32, pub y: u32, }").unwrap(); dbg!("Sample {:?}", sample); }
//! Process and store MARC data. //! //! This module provides support for parsing MARC data from XML (in both //! Library of Congress and VIAF formats), and for storing MARC data in //! Parquet files as a flat table of MARC fields. pub mod book_fields; pub mod flat_fields; pub mod parse; pub mod record; pub use record::MARCRecord;
use super::constants::*; pub fn sine_wave(freq: f32, t: f32, offset: f32) -> f32 { ((t * freq + offset) * PI * 2.0).sin() } pub fn square_wave(freq: f32, t: f32, offset: f32) -> f32 { sine_wave(freq, t, offset).signum() } pub fn sawtooth_wave(freq: f32, t: f32, offset: f32) -> f32 { (2.0 * ((t * freq + offset) - (t * freq + offset + 0.5).floor())) } pub fn triangle_wave(freq: f32, t: f32, offset: f32) -> f32 { sawtooth_wave(freq, t, offset).abs() * 2.0 - 1.0 }
use std::io::prelude::*; use mio::tcp::*; use http::*; use app_server::*; pub use regex::Regex; pub trait Handler : Send + 'static { fn process(&mut self, request: Request, response: &mut Response); fn duplicate(&self) -> Box<Handler>; } struct HandlerRule(Regex, Box<Handler>); pub struct HandlerRoute(pub String, pub Box<Handler>); pub struct HandlerApp { handlers: Vec<HandlerRule>, builder: RequestBuilder, } impl HandlerApp { pub fn new(handler_defs: Vec<HandlerRoute>) -> HandlerApp { let mut handlers = Vec::new(); for &HandlerRoute(ref s, ref h) in &handler_defs { handlers.push(HandlerRule(Regex::new(&s).unwrap(), h.duplicate())); } return HandlerApp { handlers: handlers, builder: RequestBuilder::new(), }; } } impl App for HandlerApp { fn handle(&mut self, stream: &mut TcpStream) { let mut data = Vec::new(); let _ = stream.read_to_end(&mut data); if let Some(r) = self.builder.read(&data) { let resp = &mut Response::new(stream); for &mut HandlerRule(ref regex, ref mut handler) in &mut self.handlers { if regex.is_match(&r.uri) { handler.process(r, resp); return; } } resp.set_not_found().send(); } } fn duplicate(&self) -> Box<App> { let mut handlers = Vec::new(); for &HandlerRule(ref r, ref h) in &self.handlers { handlers.push(HandlerRule(r.clone(), h.duplicate())); } Box::new(HandlerApp { handlers: handlers, builder: RequestBuilder::new(), }) } }
fn main() { fizzbuzz(50); } fn fizzbuzz(count: usize) { for i in 1..(count + 1) { match i { i if i % 5 == 0 => println!("FizzBuzz"), i if i % 3 == 0 => println!("Fizz"), _ => println!("{}", i), } } }
// MIT License // // Copyright (c) 2018-2021 Hans-Martin Will // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. use ast; use schema; use session; use Error; lalrpop_mod!(pub sql); use csv::StringRecord; /// We are using the StringRecord type provided by the CSV library as row representation pub type Row = StringRecord; pub type RowResult<'a> = Result<&'a Row, Error>; /// Query plan representation pub struct QueryPlan {} pub trait RowSet { fn reset(&mut self) -> Result<(), Error>; fn next<'a>(&'a mut self) -> Option<RowResult<'a>>; fn meta<'a>(&'a self) -> &'a schema::RowSet; } /// An empty row set that is returned when no result is needed #[derive(Debug)] struct EmptyRowSet { meta_data: schema::RowSet, } impl EmptyRowSet { fn new() -> Self { EmptyRowSet { meta_data: schema::RowSet::empty(), } } } impl RowSet for EmptyRowSet { fn reset(&mut self) -> Result<(), Error> { Ok(()) } fn next<'a>(&'a mut self) -> Option<RowResult<'a>> { None } fn meta<'a>(&'a self) -> &'a schema::RowSet { &self.meta_data } } /// A meta data row set that describes schema objects struct MetaDataRowSet { index: usize, meta_data: schema::RowSet, columns: schema::RowSet, result_buffer: Row, } impl MetaDataRowSet { fn new(columns: schema::RowSet) -> Self { MetaDataRowSet { index: 0, columns, meta_data: schema::RowSet::meta_data(), result_buffer: Row::new(), } } } impl RowSet for MetaDataRowSet { fn reset(&mut self) -> Result<(), Error> { self.index = 0; Ok(()) } fn next<'a>(&'a mut self) -> Option<RowResult<'a>> { let columns = &self.columns.columns; if self.index < columns.len() { let column = &columns[self.index]; self.index += 1; { let result = &mut self.result_buffer; result.clear(); result.push_field(column.name.as_str()); result.push_field(if column.not_null { "1" } else { "0" }); result.push_field(if column.primary_key { "1" } else { "0" }); result.push_field(&format!("{}", column.data_type)); } Some(Ok(&self.result_buffer)) } else { None } } fn meta<'a>(&'a self) -> &'a schema::RowSet { &self.meta_data } } struct FullTableScanRowSet { table_name: String, meta_data: schema::RowSet, } impl FullTableScanRowSet { fn new(table_name: String, meta_data: schema::RowSet) -> Self { FullTableScanRowSet { table_name, meta_data, } } } impl RowSet for FullTableScanRowSet { fn reset(&mut self) -> Result<(), Error> { Ok(()) } fn next<'a>(&'a mut self) -> Option<RowResult<'a>> { None } fn meta<'a>(&'a self) -> &'a schema::RowSet { &self.meta_data } } struct LiteralRowSet { meta_data: schema::RowSet, } impl LiteralRowSet { fn new(meta_data: schema::RowSet) -> Self { LiteralRowSet { meta_data } } } impl RowSet for LiteralRowSet { fn reset(&mut self) -> Result<(), Error> { Ok(()) } fn next<'a>(&'a mut self) -> Option<RowResult<'a>> { None } fn meta<'a>(&'a self) -> &'a schema::RowSet { &self.meta_data } } struct FilterRowSet { meta_data: schema::RowSet, } impl FilterRowSet { fn new(meta_data: schema::RowSet) -> Self { FilterRowSet { meta_data } } } impl RowSet for FilterRowSet { fn reset(&mut self) -> Result<(), Error> { Ok(()) } fn next<'a>(&'a mut self) -> Option<RowResult<'a>> { None } fn meta<'a>(&'a self) -> &'a schema::RowSet { &self.meta_data } } struct ProjectRowSet { meta_data: schema::RowSet, } impl ProjectRowSet { fn new(meta_data: schema::RowSet) -> Self { ProjectRowSet { meta_data } } } impl RowSet for ProjectRowSet { fn reset(&mut self) -> Result<(), Error> { Ok(()) } fn next<'a>(&'a mut self) -> Option<RowResult<'a>> { None } fn meta<'a>(&'a self) -> &'a schema::RowSet { &self.meta_data } } struct AggregateRowSet { meta_data: schema::RowSet, } impl AggregateRowSet { fn new(meta_data: schema::RowSet) -> Self { AggregateRowSet { meta_data } } } impl RowSet for AggregateRowSet { fn reset(&mut self) -> Result<(), Error> { Ok(()) } fn next<'a>(&'a mut self) -> Option<RowResult<'a>> { None } fn meta<'a>(&'a self) -> &'a schema::RowSet { &self.meta_data } } struct GroupByRowSet { meta_data: schema::RowSet, } impl GroupByRowSet { fn new(meta_data: schema::RowSet) -> Self { GroupByRowSet { meta_data } } } impl RowSet for GroupByRowSet { fn reset(&mut self) -> Result<(), Error> { Ok(()) } fn next<'a>(&'a mut self) -> Option<RowResult<'a>> { None } fn meta<'a>(&'a self) -> &'a schema::RowSet { &self.meta_data } } struct SortRowSet { meta_data: schema::RowSet, } impl SortRowSet { fn new(meta_data: schema::RowSet) -> Self { SortRowSet { meta_data } } } impl RowSet for SortRowSet { fn reset(&mut self) -> Result<(), Error> { Ok(()) } fn next<'a>(&'a mut self) -> Option<RowResult<'a>> { None } fn meta<'a>(&'a self) -> &'a schema::RowSet { &self.meta_data } } struct LimitRowSet { nested: Box<dyn RowSet>, counter: usize, offset: usize, limit: usize, } impl LimitRowSet { fn new(nested: Box<dyn RowSet>, limit: usize, offset: usize) -> Self { LimitRowSet { nested, counter: 0, limit, offset, } } } impl RowSet for LimitRowSet { fn reset(&mut self) -> Result<(), Error> { self.counter = 0; self.nested.reset() } fn next<'a>(&'a mut self) -> Option<RowResult<'a>> { // skip until the first row to return while self.counter < self.offset { let result = self.nested.next(); if result.is_none() { return None; } self.counter += 1; } // have we reached the overall limit of rows to return? if self.counter >= self.offset + self.limit { return None; } let result = self.nested.next(); if result.is_some() { self.counter += 1; } result } fn meta<'a>(&'a self) -> &'a schema::RowSet { &self.nested.meta() } } struct SortedJoinRowSet { meta_data: schema::RowSet, } impl SortedJoinRowSet { fn new(meta_data: schema::RowSet) -> Self { SortedJoinRowSet { meta_data } } } impl RowSet for SortedJoinRowSet { fn reset(&mut self) -> Result<(), Error> { Ok(()) } fn next<'a>(&'a mut self) -> Option<RowResult<'a>> { None } fn meta<'a>(&'a self) -> &'a schema::RowSet { &self.meta_data } } /// An evaluation engine for SQL statements pub struct Evaluator<'a> { session: &'a mut session::Session, } impl<'a> Evaluator<'a> { pub fn new(session: &'a mut session::Session) -> Evaluator<'a> { Evaluator { session } } pub fn eval(&mut self, command: &str) -> Result<Box<dyn RowSet>, Error> { let ast = self.parse(command)?; self.interpret(ast) } fn parse(&self, command: &str) -> Result<ast::SqlStatement, Error> { let parse_result = sql::SqlStatementParser::new().parse(command); match parse_result { Ok(statement) => Ok(statement), Err(err) => Err(Error::from(format!( "Input `{}`: parse error {:?}", command, err ))), } } fn interpret(&mut self, statement: ast::SqlStatement) -> Result<Box<dyn RowSet>, Error> { match statement { ast::SqlStatement::Statement(statement) => self.compile(statement), ast::SqlStatement::ExplainQueryPlan(statement) => { let _ = self.compile(statement)?; Err(Error::from("Explain not implemented yet!")) } ast::SqlStatement::Attach(info) => self.attach(info), ast::SqlStatement::Describe(info) => self.describe(info), ast::SqlStatement::AlterDomain(_) => unimplemented!("interpret for ALTER DOMAIN"), ast::SqlStatement::CreateDomain(_) => unimplemented!("interpret for CREATE DOMAIN"), ast::SqlStatement::DropDomain(_) => unimplemented!("interpret for DROP DOMAIN"), ast::SqlStatement::AlterTable(_) => unimplemented!("interpret for ALTER TABLE"), ast::SqlStatement::CreateTable(_) => unimplemented!("interpret for CREATE TABLE"), ast::SqlStatement::DropTable(_) => unimplemented!("interpret for DROP TABLE"), ast::SqlStatement::CreateSchema(_) => unimplemented!("interpret for CREATE SCHEMA"), ast::SqlStatement::DropSchema(_) => unimplemented!("interpret for DROP SCHEMA"), ast::SqlStatement::CreateView(_) => unimplemented!("interpret for CREATE VIEW"), ast::SqlStatement::DropView(_) => unimplemented!("interpret for DROP VIEW"), } } fn compile(&self, dml: ast::Statement) -> Result<Box<dyn RowSet>, Error> { match dml { ast::Statement::Select(select) => self.compile_select(select), _ => Err(Error::from( "Compile not implemented yet for these statement types!", )), } } fn compile_select(&self, select: ast::SelectStatement) -> Result<Box<dyn RowSet>, Error> { // compile the expression let rowset = self.compile_set_expression(&select.expr); // do we have a sorting clause? if !select.order_by.is_empty() { return Err(Error::from("Sorting not implemented yet")); } // do we have a limit clause? if select.limit.is_some() { let _limit = select.limit.unwrap(); let offset = 0; // TODO implement evaluation of expression let row_count = 0; // TODO implement evaluation of expression Ok(Box::new(LimitRowSet::new(rowset?, offset, row_count))) } else { rowset } } fn compile_set_expression(&self, expr: &ast::SetExpression) -> Result<Box<dyn RowSet>, Error> { match expr { &ast::SetExpression::Values(ref _values) => unimplemented!(), &ast::SetExpression::Op { op: ref _op, left: ref _left, right: ref _right, } => unimplemented!(), &ast::SetExpression::Query { ref mode, ref columns, ref from, ref where_expr, ref group_by, } => { assert!(match mode { &ast::SelectMode::All => true, _ => false, }); assert!(match columns { &ast::ResultColumns::All => true, _ => false, }); assert!(where_expr.is_none()); assert!(group_by.is_none()); assert!(from.len() == 1); self.compile_table_expression(from[0].as_ref()) } } } fn compile_table_expression( &self, expr: &ast::TableExpression, ) -> Result<Box<dyn RowSet>, Error> { match expr { &ast::TableExpression::Named { name: ref _name, alias: ref _alias, } => unimplemented!(), &ast::TableExpression::Select { select: ref _select, alias: ref _alias, } => unimplemented!(), &ast::TableExpression::Join { left: ref _left, right: ref _right, op: ref _op, constraint: ref _constraint, } => unimplemented!(), } } fn attach(&mut self, info: ast::AttachStatement) -> Result<Box<dyn RowSet>, Error> { self.session.database.attach_file( info.schema_name().unwrap_or(&self.session.default_schema), info.table_name(), &info.path, )?; Ok(Box::new(EmptyRowSet::new())) } fn describe(&mut self, info: ast::DescribeStatement) -> Result<Box<dyn RowSet>, Error> { let rowset = self.session.database.describe( info.schema_name().unwrap_or(&self.session.default_schema), info.table_name(), )?; Ok(Box::new(MetaDataRowSet::new(rowset))) } } /// An operator that used to construct query pipelines. trait Operator {} /* Operator types: - full table scan - map - filter - sort - accumulate - group by - sorted join - limit */
// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>. // This file may not be copied, modified, or distributed except according to those terms. /// Creates an enum where each variant contains a `Future`. The created enum impls `Future`. /// Useful when a fn needs to return possibly many different types of futures. #[macro_export] macro_rules! future_enum { { $(#[$attr:meta])* pub enum $name:ident<$($tp:ident),*> { $(#[$attrv:meta])* $($variant:ident($inner:ty)),* } } => { $(#[$attr])* pub enum $name<$($tp),*> { $(#[$attrv])* $($variant($inner)),* } impl<__T, __E, $($tp),*> $crate::futures::Future for $name<$($tp),*> where __T: Send + 'static, $($inner: $crate::futures::Future<Item=__T, Error=__E>),* { type Item = __T; type Error = __E; fn poll(&mut self) -> $crate::futures::Poll<Self::Item, Self::Error> { match *self { $($name::$variant(ref mut f) => $crate::futures::Future::poll(f)),* } } } }; { $(#[$attr:meta])* enum $name:ident<$($tp:ident),*> { $(#[$attrv:meta])* $($variant:ident($inner:ty)),* } } => { $(#[$attr])* enum $name<$($tp),*> { $(#[$attrv])* $($variant($inner)),* } impl<__T, __E, $($tp),*> $crate::futures::Future for $name<$($tp),*> where __T: Send + 'static, $($inner: $crate::futures::Future<Item=__T, Error=__E>),* { type Item = __T; type Error = __E; fn poll(&mut self) -> $crate::futures::Poll<Self::Item, Self::Error> { match *self { $($name::$variant(ref mut f) => $crate::futures::Future::poll(f)),* } } } } } #[doc(hidden)] #[macro_export] macro_rules! as_item { ($i:item) => {$i}; } #[doc(hidden)] #[macro_export] macro_rules! impl_serialize { ($impler:ident, { $($lifetime:tt)* }, $(@($name:ident $n:expr))* -- #($_n:expr) ) => { as_item! { impl$($lifetime)* $crate::serde::Serialize for $impler$($lifetime)* { #[inline] fn serialize<S>(&self, serializer: &mut S) -> ::std::result::Result<(), S::Error> where S: $crate::serde::Serializer { match *self { $( $impler::$name(ref field) => $crate::serde::Serializer::serialize_newtype_variant( serializer, stringify!($impler), $n, stringify!($name), field, ) ),* } } } } }; // All args are wrapped in a tuple so we can use the newtype variant for each one. ($impler:ident, { $($lifetime:tt)* }, $(@$finished:tt)* -- #($n:expr) $name:ident($field:ty) $($req:tt)*) => ( impl_serialize!($impler, { $($lifetime)* }, $(@$finished)* @($name $n) -- #($n + 1) $($req)*); ); // Entry ($impler:ident, { $($lifetime:tt)* }, $($started:tt)*) => (impl_serialize!($impler, { $($lifetime)* }, -- #(0) $($started)*);); } #[doc(hidden)] #[macro_export] macro_rules! impl_deserialize { ($impler:ident, $(@($name:ident $n:expr))* -- #($_n:expr) ) => ( impl $crate::serde::Deserialize for $impler { #[inline] fn deserialize<D>(deserializer: &mut D) -> ::std::result::Result<$impler, D::Error> where D: $crate::serde::Deserializer { #[allow(non_camel_case_types, unused)] enum Field { $($name),* } impl $crate::serde::Deserialize for Field { #[inline] fn deserialize<D>(deserializer: &mut D) -> ::std::result::Result<Field, D::Error> where D: $crate::serde::Deserializer { struct FieldVisitor; impl $crate::serde::de::Visitor for FieldVisitor { type Value = Field; #[inline] fn visit_usize<E>(&mut self, value: usize) -> ::std::result::Result<Field, E> where E: $crate::serde::de::Error, { $( if value == $n { return ::std::result::Result::Ok(Field::$name); } )* ::std::result::Result::Err( $crate::serde::de::Error::custom( format!("No variants have a value of {}!", value)) ) } } deserializer.deserialize_struct_field(FieldVisitor) } } struct Visitor; impl $crate::serde::de::EnumVisitor for Visitor { type Value = $impler; #[inline] fn visit<V>(&mut self, mut visitor: V) -> ::std::result::Result<$impler, V::Error> where V: $crate::serde::de::VariantVisitor { match try!(visitor.visit_variant()) { $( Field::$name => { let val = try!(visitor.visit_newtype()); ::std::result::Result::Ok($impler::$name(val)) } ),* } } } const VARIANTS: &'static [&'static str] = &[ $( stringify!($name) ),* ]; deserializer.deserialize_enum(stringify!($impler), VARIANTS, Visitor) } } ); // All args are wrapped in a tuple so we can use the newtype variant for each one. ($impler:ident, $(@$finished:tt)* -- #($n:expr) $name:ident($field:ty) $($req:tt)*) => ( impl_deserialize!($impler, $(@$finished)* @($name $n) -- #($n + 1) $($req)*); ); // Entry ($impler:ident, $($started:tt)*) => (impl_deserialize!($impler, -- #(0) $($started)*);); } /// The main macro that creates RPC services. /// /// Rpc methods are specified, mirroring trait syntax: /// /// ``` /// # #![feature(conservative_impl_trait, plugin)] /// # #![plugin(snake_to_camel)] /// # #[macro_use] extern crate tarpc; /// # fn main() {} /// # service! { /// /// Say hello /// rpc hello(name: String) -> String; /// # } /// ``` /// /// Attributes can be attached to each rpc. These attributes /// will then be attached to the generated service traits' /// corresponding `fn`s, as well as to the client stubs' RPCs. /// /// The following items are expanded in the enclosing module: /// /// * `FutureService` -- the trait defining the RPC service via a `Future` API. /// * `SyncService` -- a service trait that provides a synchronous API for when /// spawning a thread per request is acceptable. /// * `FutureServiceExt` -- provides the methods for starting a service. There is an umbrella impl /// for all implers of `FutureService`. It's a separate trait to prevent /// name collisions with RPCs. /// * `SyncServiceExt` -- same as `FutureServiceExt` but for `SyncService`. /// * `FutureClient` -- a client whose RPCs return `Future`s. /// * `SyncClient` -- a client whose RPCs block until the reply is available. Easiest /// interface to use, as it looks the same as a regular function call. /// #[macro_export] macro_rules! service { // Entry point ( $( $(#[$attr:meta])* rpc $fn_name:ident( $( $arg:ident : $in_:ty ),* ) $(-> $out:ty)* $(| $error:ty)*; )* ) => { service! {{ $( $(#[$attr])* rpc $fn_name( $( $arg : $in_ ),* ) $(-> $out)* $(| $error)*; )* }} }; // Pattern for when the next rpc has an implicit unit return type and no error type. ( { $(#[$attr:meta])* rpc $fn_name:ident( $( $arg:ident : $in_:ty ),* ); $( $unexpanded:tt )* } $( $expanded:tt )* ) => { service! { { $( $unexpanded )* } $( $expanded )* $(#[$attr])* rpc $fn_name( $( $arg : $in_ ),* ) -> () | $crate::util::Never; } }; // Pattern for when the next rpc has an explicit return type and no error type. ( { $(#[$attr:meta])* rpc $fn_name:ident( $( $arg:ident : $in_:ty ),* ) -> $out:ty; $( $unexpanded:tt )* } $( $expanded:tt )* ) => { service! { { $( $unexpanded )* } $( $expanded )* $(#[$attr])* rpc $fn_name( $( $arg : $in_ ),* ) -> $out | $crate::util::Never; } }; // Pattern for when the next rpc has an implicit unit return type and an explicit error type. ( { $(#[$attr:meta])* rpc $fn_name:ident( $( $arg:ident : $in_:ty ),* ) | $error:ty; $( $unexpanded:tt )* } $( $expanded:tt )* ) => { service! { { $( $unexpanded )* } $( $expanded )* $(#[$attr])* rpc $fn_name( $( $arg : $in_ ),* ) -> () | $error; } }; // Pattern for when the next rpc has an explicit return type and an explicit error type. ( { $(#[$attr:meta])* rpc $fn_name:ident( $( $arg:ident : $in_:ty ),* ) -> $out:ty | $error:ty; $( $unexpanded:tt )* } $( $expanded:tt )* ) => { service! { { $( $unexpanded )* } $( $expanded )* $(#[$attr])* rpc $fn_name( $( $arg : $in_ ),* ) -> $out | $error; } }; // Pattern for when all return types have been expanded ( { } // none left to expand $( $(#[$attr:meta])* rpc $fn_name:ident ( $( $arg:ident : $in_:ty ),* ) -> $out:ty | $error:ty; )* ) => { /// Defines the `Future` RPC service. Implementors must be `Clone`, `Send`, and `'static`, /// as required by `tokio_proto::NewService`. This is required so that the service can be used /// to respond to multiple requests concurrently. pub trait FutureService: ::std::marker::Send + ::std::clone::Clone + 'static { $( snake_to_camel! { /// The type of future returned by the fn of the same name. type $fn_name: $crate::futures::Future<Item=$out, Error=$error> + Send; } $(#[$attr])* fn $fn_name(&self, $($arg:$in_),*) -> ty_snake_to_camel!(Self::$fn_name); )* } /// Provides a function for starting the service. This is a separate trait from /// `FutureService` to prevent collisions with the names of RPCs. pub trait FutureServiceExt: FutureService { /// Registers the service with the given registry, listening on the given address. fn listen<L>(self, addr: L) -> ::std::io::Result<$crate::tokio_proto::server::ServerHandle> where L: ::std::net::ToSocketAddrs { return $crate::listen(addr, __AsyncServer(self)); #[derive(Clone)] struct __AsyncServer<S>(S); impl<S> ::std::fmt::Debug for __AsyncServer<S> { fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(fmt, "__AsyncServer {{ .. }}") } } #[allow(non_camel_case_types)] enum Reply<TyParamS: FutureService> { DeserializeError($crate::SerializeFuture), $($fn_name($crate::futures::Then<$crate::futures::MapErr<ty_snake_to_camel!(TyParamS::$fn_name), fn($error) -> $crate::WireError<$error>>, $crate::SerializeFuture, fn(::std::result::Result<$out, $crate::WireError<$error>>) -> $crate::SerializeFuture>)),* } impl<S: FutureService> $crate::futures::Future for Reply<S> { type Item = $crate::SerializedReply; type Error = ::std::io::Error; fn poll(&mut self) -> $crate::futures::Poll<Self::Item, Self::Error> { match *self { Reply::DeserializeError(ref mut f) => $crate::futures::Future::poll(f), $(Reply::$fn_name(ref mut f) => $crate::futures::Future::poll(f)),* } } } impl<S> $crate::tokio_service::Service for __AsyncServer<S> where S: FutureService { type Req = ::std::vec::Vec<u8>; type Resp = $crate::SerializedReply; type Error = ::std::io::Error; type Fut = Reply<S>; fn call(&self, req: Self::Req) -> Self::Fut { #[allow(non_camel_case_types, unused)] #[derive(Debug)] enum __ServerSideRequest { $( $fn_name(( $($in_,)* )) ),* } impl_deserialize!(__ServerSideRequest, $($fn_name(($($in_),*)))*); let request = $crate::deserialize(&req); let request: __ServerSideRequest = match request { ::std::result::Result::Ok(request) => request, ::std::result::Result::Err(e) => { return Reply::DeserializeError(deserialize_error(e)); } }; match request {$( __ServerSideRequest::$fn_name(( $($arg,)* )) => { const SERIALIZE: fn(::std::result::Result<$out, $crate::WireError<$error>>) -> $crate::SerializeFuture = $crate::serialize_reply; const TO_APP: fn($error) -> $crate::WireError<$error> = $crate::WireError::App; let reply = FutureService::$fn_name(&self.0, $($arg),*); let reply = $crate::futures::Future::map_err(reply, TO_APP); let reply = $crate::futures::Future::then(reply, SERIALIZE); return Reply::$fn_name(reply); } )*} #[inline] fn deserialize_error<E: ::std::error::Error>(e: E) -> $crate::SerializeFuture { let err = $crate::WireError::ServerDeserialize::<$crate::util::Never>(e.to_string()); $crate::serialize_reply(::std::result::Result::Err::<(), _>(err)) } } } } } /// Defines the blocking RPC service. Must be `Clone`, `Send`, and `'static`, /// as required by `tokio_proto::NewService`. This is required so that the service can be used /// to respond to multiple requests concurrently. pub trait SyncService: ::std::marker::Send + ::std::clone::Clone + 'static { $( $(#[$attr])* fn $fn_name(&self, $($arg:$in_),*) -> ::std::result::Result<$out, $error>; )* } /// Provides a function for starting the service. This is a separate trait from /// `SyncService` to prevent collisions with the names of RPCs. pub trait SyncServiceExt: SyncService { /// Registers the service with the given registry, listening on the given address. fn listen<L>(self, addr: L) -> ::std::io::Result<$crate::tokio_proto::server::ServerHandle> where L: ::std::net::ToSocketAddrs { let service = __SyncServer { service: self, }; return service.listen(addr); #[derive(Clone)] struct __SyncServer<S> { service: S, } impl<S> FutureService for __SyncServer<S> where S: SyncService { $( impl_snake_to_camel! { type $fn_name = $crate::futures::Flatten< $crate::futures::MapErr< $crate::futures::Oneshot< $crate::futures::Done<$out, $error>>, fn($crate::futures::Canceled) -> $error>>; } fn $fn_name(&self, $($arg:$in_),*) -> ty_snake_to_camel!(Self::$fn_name) { fn unimplemented(_: $crate::futures::Canceled) -> $error { // TODO(tikue): what do do if SyncService panics? unimplemented!() } let (c, p) = $crate::futures::oneshot(); let service = self.clone(); ::std::thread::spawn(move || { let reply = SyncService::$fn_name(&service.service, $($arg),*); c.complete($crate::futures::IntoFuture::into_future(reply)); }); let p = $crate::futures::Future::map_err(p, unimplemented as fn($crate::futures::Canceled) -> $error); $crate::futures::Future::flatten(p) } )* } } } impl<A> FutureServiceExt for A where A: FutureService {} impl<S> SyncServiceExt for S where S: SyncService {} #[allow(unused)] #[derive(Clone, Debug)] /// The client stub that makes RPC calls to the server. Exposes a blocking interface. pub struct SyncClient(FutureClient); impl $crate::sync::Connect for SyncClient { fn connect<A>(addr: A) -> ::std::result::Result<Self, ::std::io::Error> where A: ::std::net::ToSocketAddrs, { let mut addrs = try!(::std::net::ToSocketAddrs::to_socket_addrs(&addr)); let addr = if let ::std::option::Option::Some(a) = ::std::iter::Iterator::next(&mut addrs) { a } else { return ::std::result::Result::Err( ::std::io::Error::new( ::std::io::ErrorKind::AddrNotAvailable, "`ToSocketAddrs::to_socket_addrs` returned an empty iterator.")); }; let client = <FutureClient as $crate::future::Connect>::connect(&addr); let client = $crate::futures::Future::wait(client); let client = SyncClient(try!(client)); ::std::result::Result::Ok(client) } } impl SyncClient { $( #[allow(unused)] $(#[$attr])* #[inline] pub fn $fn_name(&self, $($arg: &$in_),*) -> ::std::result::Result<$out, $crate::Error<$error>> { let rpc = (self.0).$fn_name($($arg),*); $crate::futures::Future::wait(rpc) } )* } #[allow(unused)] #[derive(Clone, Debug)] /// The client stub that makes RPC calls to the server. Exposes a Future interface. pub struct FutureClient($crate::Client); impl $crate::future::Connect for FutureClient { type Fut = $crate::futures::Map<$crate::ClientFuture, fn($crate::Client) -> Self>; fn connect(addr: &::std::net::SocketAddr) -> Self::Fut { let client = <$crate::Client as $crate::future::Connect>::connect(addr); $crate::futures::Future::map(client, FutureClient) } } #[allow(non_camel_case_types, unused)] #[derive(Debug)] enum __ClientSideRequest<'a> { $( $fn_name(&'a ( $(&'a $in_,)* )) ),* } impl_serialize!(__ClientSideRequest, { <'__a> }, $($fn_name(($($in_),*)))*); impl FutureClient { $( #[allow(unused)] $(#[$attr])* #[inline] pub fn $fn_name(&self, $($arg: &$in_),*) -> impl $crate::futures::Future<Item=$out, Error=$crate::Error<$error>> + Send + 'static { future_enum! { enum Fut<C, F> { Called(C), Failed(F) } } let args = ($($arg,)*); let req = &__ClientSideRequest::$fn_name(&args); let req = match $crate::Packet::serialize(&req) { ::std::result::Result::Err(e) => return Fut::Failed($crate::futures::failed($crate::Error::ClientSerialize(e))), ::std::result::Result::Ok(req) => req, }; let fut = $crate::tokio_service::Service::call(&self.0, req); Fut::Called($crate::futures::Future::then(fut, move |msg| { let msg: Vec<u8> = try!(msg); let msg: ::std::result::Result<::std::result::Result<$out, $crate::WireError<$error>>, _> = $crate::deserialize(&msg); match msg { ::std::result::Result::Ok(msg) => ::std::result::Result::Ok(try!(msg)), ::std::result::Result::Err(e) => ::std::result::Result::Err($crate::Error::ClientDeserialize(e)), } })) } )* } } } // allow dead code; we're just testing that the macro expansion compiles #[allow(dead_code)] #[cfg(test)] mod syntax_test { use util::Never; service! { rpc hello() -> String; #[doc="attr"] rpc attr(s: String) -> String; rpc no_args_no_return(); rpc no_args() -> (); rpc one_arg(foo: String) -> i32; rpc two_args_no_return(bar: String, baz: u64); rpc two_args(bar: String, baz: u64) -> String; rpc no_args_ret_error() -> i32 | Never; rpc one_arg_ret_error(foo: String) -> String | Never; rpc no_arg_implicit_return_error() | Never; #[doc="attr"] rpc one_arg_implicit_return_error(foo: String) | Never; } } #[cfg(test)] mod functional_test { use futures::{Future, failed}; extern crate env_logger; service! { rpc add(x: i32, y: i32) -> i32; rpc hey(name: String) -> String; } mod sync { use sync::Connect; use util::Never; use super::env_logger; use super::{SyncClient, SyncService, SyncServiceExt}; #[derive(Clone, Copy)] struct Server; impl SyncService for Server { fn add(&self, x: i32, y: i32) -> Result<i32, Never> { Ok(x + y) } fn hey(&self, name: String) -> Result<String, Never> { Ok(format!("Hey, {}.", name)) } } #[test] fn simple() { let _ = env_logger::init(); let handle = Server.listen("localhost:0").unwrap(); let client = SyncClient::connect(handle.local_addr()).unwrap(); assert_eq!(3, client.add(&1, &2).unwrap()); assert_eq!("Hey, Tim.", client.hey(&"Tim".to_string()).unwrap()); } #[test] fn clone() { let handle = Server.listen("localhost:0").unwrap(); let client1 = SyncClient::connect(handle.local_addr()).unwrap(); let client2 = client1.clone(); assert_eq!(3, client1.add(&1, &2).unwrap()); assert_eq!(3, client2.add(&1, &2).unwrap()); } #[test] fn other_service() { let _ = env_logger::init(); let handle = Server.listen("localhost:0").unwrap(); let client = super::other_service::SyncClient::connect(handle.local_addr()).unwrap(); match client.foo().err().unwrap() { ::Error::ServerDeserialize(_) => {} // good bad => panic!("Expected Error::ServerDeserialize but got {}", bad), } } } mod future { use future::Connect; use util::Never; use futures::{Finished, Future, finished}; use super::env_logger; use super::{FutureClient, FutureService, FutureServiceExt}; #[derive(Clone)] struct Server; impl FutureService for Server { type Add = Finished<i32, Never>; fn add(&self, x: i32, y: i32) -> Self::Add { finished(x + y) } type Hey = Finished<String, Never>; fn hey(&self, name: String) -> Self::Hey { finished(format!("Hey, {}.", name)) } } #[test] fn simple() { let _ = env_logger::init(); let handle = Server.listen("localhost:0").unwrap(); let client = FutureClient::connect(handle.local_addr()).wait().unwrap(); assert_eq!(3, client.add(&1, &2).wait().unwrap()); assert_eq!("Hey, Tim.", client.hey(&"Tim".to_string()).wait().unwrap()); } #[test] fn clone() { let _ = env_logger::init(); let handle = Server.listen("localhost:0").unwrap(); let client1 = FutureClient::connect(handle.local_addr()).wait().unwrap(); let client2 = client1.clone(); assert_eq!(3, client1.add(&1, &2).wait().unwrap()); assert_eq!(3, client2.add(&1, &2).wait().unwrap()); } #[test] fn other_service() { let _ = env_logger::init(); let handle = Server.listen("localhost:0").unwrap(); let client = super::other_service::FutureClient::connect(handle.local_addr()).wait().unwrap(); match client.foo().wait().err().unwrap() { ::Error::ServerDeserialize(_) => {} // good bad => panic!(r#"Expected Error::ServerDeserialize but got "{}""#, bad), } } } pub mod error_service { service! { rpc bar() -> u32 | ::util::Message; } } #[derive(Clone)] struct ErrorServer; impl error_service::FutureService for ErrorServer { type Bar = ::futures::Failed<u32, ::util::Message>; fn bar(&self) -> Self::Bar { info!("Called bar"); failed("lol jk".into()) } } #[test] fn error() { use future::Connect as Fc; use sync::Connect as Sc; use std::error::Error as E; use self::error_service::*; let _ = env_logger::init(); let handle = ErrorServer.listen("localhost:0").unwrap(); let client = FutureClient::connect(handle.local_addr()).wait().unwrap(); client.bar() .then(move |result| { match result.err().unwrap() { ::Error::App(e) => { assert_eq!(e.description(), "lol jk"); Ok::<_, ()>(()) } // good bad => panic!("Expected Error::App but got {:?}", bad), } }) .wait() .unwrap(); let client = SyncClient::connect(handle.local_addr()).unwrap(); match client.bar().err().unwrap() { ::Error::App(e) => { assert_eq!(e.description(), "lol jk"); } // good bad => panic!("Expected Error::App but got {:?}", bad), } } pub mod other_service { service! { rpc foo(); } } }
#[doc = "Reader of register BUFF_CTL"] pub type R = crate::R<u32, super::BUFF_CTL>; #[doc = "Writer for register BUFF_CTL"] pub type W = crate::W<u32, super::BUFF_CTL>; #[doc = "Register BUFF_CTL `reset()`'s with value 0x01"] impl crate::ResetValue for super::BUFF_CTL { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x01 } } #[doc = "Reader of field `WRITE_BUFF`"] pub type WRITE_BUFF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `WRITE_BUFF`"] pub struct WRITE_BUFF_W<'a> { w: &'a mut W, } impl<'a> WRITE_BUFF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } impl R { #[doc = "Bit 0 - Specifies if write transfer can be buffered in the bus infrastructure bridges: '0': Write transfers are not buffered, independent of the transfer's bufferable attribute. '1': Write transfers can be buffered, if the transfer's bufferable attribute indicates that the transfer is a bufferable/posted write."] #[inline(always)] pub fn write_buff(&self) -> WRITE_BUFF_R { WRITE_BUFF_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Specifies if write transfer can be buffered in the bus infrastructure bridges: '0': Write transfers are not buffered, independent of the transfer's bufferable attribute. '1': Write transfers can be buffered, if the transfer's bufferable attribute indicates that the transfer is a bufferable/posted write."] #[inline(always)] pub fn write_buff(&mut self) -> WRITE_BUFF_W { WRITE_BUFF_W { w: self } } }
// Copyright 2018 (c) rust-themis developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Secure Message system. //! //! **Secure Message** provides a sequence-independent, stateless, contextless messaging system. //! This may be preferred in cases that do not require frequent sequential message exchange and/or //! in low-bandwidth contexts. It is secure enough to exchange messages from time to time, but if //! you would like to have Perfect Forward Secrecy and higher security guarantees, please consider //! using [Secure Session] instead. //! //! Secure Message offers two modes of operation: //! //! - In **[Sign]/[Verify]** mode the message is signed using the private key //! of the sender and is verified by the receiver using the public key of //! the sender. The message is packed in a suitable container and ECDSA is //! used by default to sign the message (when RSA key is used, RSA+PSS+PKCS#7 //! digital signature is used). //! //! - In **[Encrypt/Decrypt]** mode the message will be encrypted with a randomly //! generated key (in RSA) or a key derived by ECDH (in ECDSA), via symmetric //! algorithm with Secure Cell in seal mode (keys are 256 bits long). //! //! [Here you can read more][wiki] about cryptographic internals of Secure Messages. //! //! [Secure Session]: ../secure_session/index.html //! [Sign]: struct.SecureSign.html //! [Verify]: struct.SecureVerify.html //! [Encrypt/Decrypt]: struct.SecureMessage.html //! [wiki]: https://github.com/cossacklabs/themis/wiki/Secure-Message-cryptosystem //! //! # Examples //! //! Basic operation of Secure Message looks like this: //! //! ``` //! # fn main() -> Result<(), themis::Error> { //! use themis::secure_message::SecureMessage; //! use themis::keygen::gen_ec_key_pair; //! //! let key_pair = gen_ec_key_pair(); //! //! let secure = SecureMessage::new(key_pair); //! //! let encrypted = secure.wrap(b"message")?; //! let decrypted = secure.unwrap(&encrypted)?; //! assert_eq!(decrypted, b"message"); //! # Ok(()) //! # } //! ``` //! //! You can find more examples for each operation mode in their respective documentation. use std::ptr; use bindings::{themis_secure_message_unwrap, themis_secure_message_wrap}; use crate::error::{Error, ErrorKind, Result}; use crate::keys::{KeyPair, PublicKey, SecretKey}; use crate::utils::into_raw_parts; /// Secure Message encryption and decryption. /// /// **Encrypted message** is useful when you need the full stack of protection for your data — /// in most cases you will be using this flavor. Encrypted messages currently use Secure Cell /// in sealing mode for data protection. /// /// # Examples /// /// In order to use Secure Message in encrypting mode you will need to have both public and /// secret keys available on both peers. Typical usage of Secure Message looks like this: /// /// ``` /// # fn main() -> Result<(), themis::Error> { /// use themis::secure_message::SecureMessage; /// use themis::keygen::gen_ec_key_pair; /// /// // Generate and share this key pair between peers. /// let key_pair = gen_ec_key_pair(); /// /// // Alice uses her own Secure Message instance to wrap (encrypt) messages. /// let secure_a = SecureMessage::new(key_pair.clone()); /// let encrypted = secure_a.wrap(b"message")?; /// /// // Bob uses his Secure Message instance to unwrap (decrypt) received messages. /// let secure_b = SecureMessage::new(key_pair.clone()); /// let decrypted = secure_b.unwrap(&encrypted)?; /// /// assert_eq!(decrypted, b"message"); /// # Ok(()) /// # } /// ``` #[derive(Clone)] pub struct SecureMessage { key_pair: KeyPair, } impl SecureMessage { /// Makes a new Secure Message using given key pair. /// /// Both ECDSA and RSA key pairs are supported. pub fn new<K: Into<KeyPair>>(key_pair: K) -> Self { Self { key_pair: key_pair.into(), } } /// Wraps the provided message into a secure encrypted message. /// /// # Examples /// /// You can use anything convertible into a byte slice as a message: a byte slice or an array, /// a `Vec<u8>`, or a `String`. /// /// ``` /// # fn main() -> Result<(), themis::Error> { /// use themis::secure_message::SecureMessage; /// use themis::keygen::gen_ec_key_pair; /// /// let secure = SecureMessage::new(gen_ec_key_pair()); /// /// secure.wrap(b"byte string")?; /// secure.wrap(&[1, 2, 3, 4, 5])?; /// secure.wrap(vec![6, 7, 8, 9])?; /// secure.wrap(format!("owned string"))?; /// # Ok(()) /// # } /// ``` pub fn wrap<M: AsRef<[u8]>>(&self, message: M) -> Result<Vec<u8>> { wrap( self.key_pair.secret_key_bytes(), self.key_pair.public_key_bytes(), message.as_ref(), ) } /// Unwraps an encrypted message back into its original form. pub fn unwrap<M: AsRef<[u8]>>(&self, wrapped: M) -> Result<Vec<u8>> { unwrap( self.key_pair.secret_key_bytes(), self.key_pair.public_key_bytes(), wrapped.as_ref(), ) } } /// Secure Message signing. /// /// **Signed message** is useful for cases where you don’t need data confidentiality. It allows /// the receiver to verify the origin and integrity of the data while still allowing intermediate /// nodes to process it accordingly (for example, route data based on its type). /// /// Signatures can be checked with [`SecureVerify`]. /// /// [`SecureVerify`]: struct.SecureVerify.html /// /// # Examples /// /// In order to sign messages you need only the secret part of a key pair. It does not need to be /// shared with your peer for verification. /// /// ``` /// # fn main() -> Result<(), themis::Error> { /// use themis::secure_message::SecureSign; /// use themis::secure_message::SecureVerify; /// use themis::keygen::gen_rsa_key_pair; /// /// // Alice generates a key pair and shares `public` part with Bob /// let (secret, public) = gen_rsa_key_pair().split(); /// /// // Alice is able to sign her messages with her secret key. /// let secure_a = SecureSign::new(secret); /// let signed_message = secure_a.sign(b"important message")?; /// /// // Bob is able to verify that signature on the message matches. /// let secure_b = SecureVerify::new(public); /// let received_message = secure_b.verify(&signed_message)?; /// assert_eq!(received_message, b"important message"); /// # Ok(()) /// # } /// ``` /// /// Note that the signed message is _not encrypted_ and contains the original data as plain text: /// /// ``` /// # use themis::secure_message::SecureSign; /// # use themis::secure_message::SecureVerify; /// # use themis::keygen::gen_rsa_key_pair; /// # /// # let (secret, _) = gen_rsa_key_pair().split(); /// # let secure = SecureSign::new(secret); /// # let signed_message = secure.sign(b"important message").unwrap(); /// # /// let message = b"important message"; /// /// assert!(signed_message.windows(message.len()).any(|subslice| subslice == message)); /// ``` #[derive(Clone)] pub struct SecureSign { secret_key: SecretKey, } impl SecureSign { /// Makes a new Secure Message using given secret key. /// /// Both ECDSA and RSA keys are supported. pub fn new<S: Into<SecretKey>>(secret_key: S) -> Self { Self { secret_key: secret_key.into(), } } /// Securely signs a message and returns it with signature attached. /// /// # Examples /// /// You can use anything convertible into a byte slice as a message: a byte slice or an array, /// a `Vec<u8>`, or a `String`. /// /// ``` /// # fn main() -> Result<(), themis::Error> { /// use themis::secure_message::SecureSign; /// use themis::keygen::gen_ec_key_pair; /// /// let secure = SecureSign::new(gen_ec_key_pair().split().0); /// /// secure.sign(b"byte string")?; /// secure.sign(&[1, 2, 3, 4, 5])?; /// secure.sign(vec![6, 7, 8, 9])?; /// secure.sign(format!("owned string"))?; /// # Ok(()) /// # } /// ``` pub fn sign<M: AsRef<[u8]>>(&self, message: M) -> Result<Vec<u8>> { wrap(self.secret_key.as_ref(), &[], message.as_ref()) } } // TODO: provide a way to inspect signed messages // // It would be nice to be able to get access to plaintext data in messages returned by SecureSign. // Consider returning something like SignedMessage which is a newtype over Vec<u8> with additional // utility methods and impls like AsRef<[u8]> and Into<Vec<u8>>. /// Secure Message verification. /// /// **Signed message** is useful for cases where you don’t need data confidentiality. It allows /// the receiver to verify the origin and integrity of the data while still allowing intermediate /// nodes to process it accordingly (for example, route data based on its type). /// /// Verifies signatures produced by [`SecureSign`]. /// /// [`SecureSign`]: struct.SecureSign.html /// /// # Examples /// /// In order to verify signed messages you need the public part of a key pair corresponding to the /// secret key used by your peer to sign messages. /// /// ``` /// # fn main() -> Result<(), themis::Error> { /// use themis::secure_message::SecureSign; /// use themis::secure_message::SecureVerify; /// use themis::keygen::gen_ec_key_pair; /// /// // Alice generates a key pair and shares `public` part with Bob /// let (secret, public) = gen_ec_key_pair().split(); /// /// // Alice is able to sign her messages with her secret key. /// let secure_a = SecureSign::new(secret); /// let signed_message = secure_a.sign(b"important message")?; /// /// // Bob is able to verify that signature on the message matches. /// let secure_b = SecureVerify::new(public); /// let received_message = secure_b.verify(&signed_message)?; /// assert_eq!(received_message, b"important message"); /// # Ok(()) /// # } /// ``` /// /// Secure Message guarantees integrity of the message and identity of its author. /// /// ``` /// # use themis::secure_message::SecureSign; /// # use themis::secure_message::SecureVerify; /// # use themis::keygen::gen_ec_key_pair; /// # /// # let (secret, public) = gen_ec_key_pair().split(); /// # let secure_a = SecureSign::new(secret); /// # let secure_b = SecureVerify::new(public); /// # let signed_message = secure_a.sign(b"important message").unwrap(); /// # /// // Let's flip some bits somewhere. /// let mut corrupted_message = signed_message.clone(); /// corrupted_message[20] = !corrupted_message[20]; /// /// // Bob is able to see that the message has been tampered. /// assert!(secure_b.verify(&corrupted_message).is_err()); /// /// // Only Alice's public key verifies the message, any other key won't do. /// let (_, carol_public_key) = gen_ec_key_pair().split(); /// let secure_c = SecureVerify::new(carol_public_key); /// /// assert!(secure_c.verify(&signed_message).is_err()); /// ``` #[derive(Clone)] pub struct SecureVerify { public_key: PublicKey, } impl SecureVerify { /// Makes a new Secure Message using given public key. /// /// Both ECDSA and RSA keys are supported. pub fn new<P: Into<PublicKey>>(public_key: P) -> Self { Self { public_key: public_key.into(), } } /// Verifies the signature and returns the original message. pub fn verify<M: AsRef<[u8]>>(&self, message: M) -> Result<Vec<u8>> { unwrap(&[], self.public_key.as_ref(), message.as_ref()) } } /// Wrap a message into a secure message. fn wrap(secret_key: &[u8], public_key: &[u8], message: &[u8]) -> Result<Vec<u8>> { let (secret_key_ptr, secret_key_len) = into_raw_parts(secret_key); let (public_key_ptr, public_key_len) = into_raw_parts(public_key); let (message_ptr, message_len) = into_raw_parts(message); let mut wrapped = Vec::new(); let mut wrapped_len = 0; unsafe { let status = themis_secure_message_wrap( secret_key_ptr, secret_key_len, public_key_ptr, public_key_len, message_ptr, message_len, ptr::null_mut(), &mut wrapped_len, ); let error = Error::from_themis_status(status); if error.kind() != ErrorKind::BufferTooSmall { return Err(error); } } wrapped.reserve(wrapped_len); unsafe { let status = themis_secure_message_wrap( secret_key_ptr, secret_key_len, public_key_ptr, public_key_len, message_ptr, message_len, wrapped.as_mut_ptr(), &mut wrapped_len, ); let error = Error::from_themis_status(status); if error.kind() != ErrorKind::Success { return Err(error); } debug_assert!(wrapped_len <= wrapped.capacity()); wrapped.set_len(wrapped_len as usize); } Ok(wrapped) } /// Unwrap a secure message into a message. fn unwrap(secret_key: &[u8], public_key: &[u8], wrapped: &[u8]) -> Result<Vec<u8>> { let (secret_key_ptr, secret_key_len) = into_raw_parts(secret_key); let (public_key_ptr, public_key_len) = into_raw_parts(public_key); let (wrapped_ptr, wrapped_len) = into_raw_parts(wrapped); let mut message = Vec::new(); let mut message_len = 0; unsafe { let status = themis_secure_message_unwrap( secret_key_ptr, secret_key_len, public_key_ptr, public_key_len, wrapped_ptr, wrapped_len, ptr::null_mut(), &mut message_len, ); let error = Error::from_themis_status(status); if error.kind() != ErrorKind::BufferTooSmall { return Err(error); } } message.reserve(message_len); unsafe { let status = themis_secure_message_unwrap( secret_key_ptr, secret_key_len, public_key_ptr, public_key_len, wrapped_ptr, wrapped_len, message.as_mut_ptr(), &mut message_len, ); let error = Error::from_themis_status(status); if error.kind() != ErrorKind::Success { return Err(error); } debug_assert!(message_len <= message.capacity()); message.set_len(message_len as usize); } Ok(message) }
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Operation { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub display: Option<operation::Display>, } pub mod operation { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Display { #[serde(default, skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub resource: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Operation>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Workspace { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkspaceProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<Identity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceProperties { #[serde(rename = "workspaceId", default, skip_serializing_if = "Option::is_none")] pub workspace_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, #[serde(rename = "keyVault", default, skip_serializing_if = "Option::is_none")] pub key_vault: Option<String>, #[serde(rename = "applicationInsights", default, skip_serializing_if = "Option::is_none")] pub application_insights: Option<String>, #[serde(rename = "containerRegistry", default, skip_serializing_if = "Option::is_none")] pub container_registry: Option<String>, #[serde(rename = "storageAccount", default, skip_serializing_if = "Option::is_none")] pub storage_account: Option<String>, #[serde(rename = "discoveryUrl", default, skip_serializing_if = "Option::is_none")] pub discovery_url: Option<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<workspace_properties::ProvisioningState>, #[serde(default, skip_serializing_if = "Option::is_none")] pub encryption: Option<EncryptionProperty>, #[serde(rename = "hbiWorkspace", default, skip_serializing_if = "Option::is_none")] pub hbi_workspace: Option<bool>, #[serde(rename = "serviceProvisionedResourceGroup", default, skip_serializing_if = "Option::is_none")] pub service_provisioned_resource_group: Option<String>, #[serde(rename = "privateLinkCount", default, skip_serializing_if = "Option::is_none")] pub private_link_count: Option<i32>, #[serde(rename = "imageBuildCompute", default, skip_serializing_if = "Option::is_none")] pub image_build_compute: Option<String>, #[serde(rename = "allowPublicAccessWhenBehindVnet", default, skip_serializing_if = "Option::is_none")] pub allow_public_access_when_behind_vnet: Option<bool>, #[serde(rename = "privateEndpointConnections", default, skip_serializing_if = "Vec::is_empty")] pub private_endpoint_connections: Vec<PrivateEndpointConnection>, #[serde(rename = "sharedPrivateLinkResources", default, skip_serializing_if = "Vec::is_empty")] pub shared_private_link_resources: Vec<SharedPrivateLinkResource>, #[serde(rename = "notebookInfo", default, skip_serializing_if = "Option::is_none")] pub notebook_info: Option<NotebookResourceInfo>, #[serde(rename = "serviceManagedResourcesSettings", default, skip_serializing_if = "Option::is_none")] pub service_managed_resources_settings: Option<ServiceManagedResourcesSettings>, #[serde(rename = "primaryUserAssignedIdentity", default, skip_serializing_if = "Option::is_none")] pub primary_user_assigned_identity: Option<String>, #[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")] pub tenant_id: Option<String>, } pub mod workspace_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Unknown, Updating, Creating, Deleting, Succeeded, Failed, Canceled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<Identity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkspacePropertiesUpdateParameters>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspacePropertiesUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, #[serde(rename = "imageBuildCompute", default, skip_serializing_if = "Option::is_none")] pub image_build_compute: Option<String>, #[serde(rename = "serviceManagedResourcesSettings", default, skip_serializing_if = "Option::is_none")] pub service_managed_resources_settings: Option<ServiceManagedResourcesSettings>, #[serde(rename = "primaryUserAssignedIdentity", default, skip_serializing_if = "Option::is_none")] pub primary_user_assigned_identity: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UsageName { #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, #[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")] pub localized_value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Usage { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "amlWorkspaceLocation", default, skip_serializing_if = "Option::is_none")] pub aml_workspace_location: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<usage::Unit>, #[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")] pub current_value: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub limit: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<UsageName>, } pub mod usage { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Unit { Count, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ListUsagesResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Usage>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualMachineSize { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub family: Option<String>, #[serde(rename = "vCPUs", default, skip_serializing_if = "Option::is_none")] pub v_cp_us: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub gpus: Option<i32>, #[serde(rename = "osVhdSizeMB", default, skip_serializing_if = "Option::is_none")] pub os_vhd_size_mb: Option<i32>, #[serde(rename = "maxResourceVolumeMB", default, skip_serializing_if = "Option::is_none")] pub max_resource_volume_mb: Option<i32>, #[serde(rename = "memoryGB", default, skip_serializing_if = "Option::is_none")] pub memory_gb: Option<f64>, #[serde(rename = "lowPriorityCapable", default, skip_serializing_if = "Option::is_none")] pub low_priority_capable: Option<bool>, #[serde(rename = "premiumIO", default, skip_serializing_if = "Option::is_none")] pub premium_io: Option<bool>, #[serde(rename = "estimatedVMPrices", default, skip_serializing_if = "Option::is_none")] pub estimated_vm_prices: Option<EstimatedVmPrices>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EstimatedVmPrices { #[serde(rename = "billingCurrency")] pub billing_currency: estimated_vm_prices::BillingCurrency, #[serde(rename = "unitOfMeasure")] pub unit_of_measure: estimated_vm_prices::UnitOfMeasure, pub values: Vec<EstimatedVmPrice>, } pub mod estimated_vm_prices { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BillingCurrency { #[serde(rename = "USD")] Usd, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum UnitOfMeasure { OneHour, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EstimatedVmPrice { #[serde(rename = "retailPrice")] pub retail_price: f64, #[serde(rename = "osType")] pub os_type: estimated_vm_price::OsType, #[serde(rename = "vmTier")] pub vm_tier: estimated_vm_price::VmTier, } pub mod estimated_vm_price { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum OsType { Linux, Windows, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum VmTier { Standard, LowPriority, Spot, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualMachineSizeListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<VirtualMachineSize>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Workspace>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct QuotaBaseProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub limit: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<quota_base_properties::Unit>, } pub mod quota_base_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Unit { Count, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct QuotaUpdateParameters { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<QuotaBaseProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UpdateWorkspaceQuotasResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<UpdateWorkspaceQuotas>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UpdateWorkspaceQuotas { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub limit: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<update_workspace_quotas::Unit>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<update_workspace_quotas::Status>, } pub mod update_workspace_quotas { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Unit { Count, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Undefined, Success, Failure, InvalidQuotaBelowClusterMinimum, InvalidQuotaExceedsSubscriptionLimit, #[serde(rename = "InvalidVMFamilyName")] InvalidVmFamilyName, OperationNotSupportedForSku, OperationNotEnabledForRegion, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceName { #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, #[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")] pub localized_value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceQuota { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "amlWorkspaceLocation", default, skip_serializing_if = "Option::is_none")] pub aml_workspace_location: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<ResourceName>, #[serde(default, skip_serializing_if = "Option::is_none")] pub limit: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<resource_quota::Unit>, } pub mod resource_quota { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Unit { Count, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ListWorkspaceQuotas { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ResourceQuota>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Identity { #[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")] pub principal_id: Option<String>, #[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")] pub tenant_id: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<identity::Type>, #[serde(rename = "userAssignedIdentities", default, skip_serializing_if = "Option::is_none")] pub user_assigned_identities: Option<UserAssignedIdentities>, } pub mod identity { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { SystemAssigned, #[serde(rename = "SystemAssigned,UserAssigned")] SystemAssignedUserAssigned, UserAssigned, None, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UserAssignedIdentities {} #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UserAssignedIdentity { #[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")] pub principal_id: Option<String>, #[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")] pub tenant_id: Option<String>, #[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")] pub client_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceId { pub id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ListWorkspaceKeysResult { #[serde(rename = "userStorageKey", default, skip_serializing_if = "Option::is_none")] pub user_storage_key: Option<String>, #[serde(rename = "userStorageResourceId", default, skip_serializing_if = "Option::is_none")] pub user_storage_resource_id: Option<String>, #[serde(rename = "appInsightsInstrumentationKey", default, skip_serializing_if = "Option::is_none")] pub app_insights_instrumentation_key: Option<String>, #[serde(rename = "containerRegistryCredentials", default, skip_serializing_if = "Option::is_none")] pub container_registry_credentials: Option<RegistryListCredentialsResult>, #[serde(rename = "notebookAccessKeys", default, skip_serializing_if = "Option::is_none")] pub notebook_access_keys: Option<ListNotebookKeysResult>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NotebookAccessTokenResult { #[serde(rename = "notebookResourceId", default, skip_serializing_if = "Option::is_none")] pub notebook_resource_id: Option<String>, #[serde(rename = "hostName", default, skip_serializing_if = "Option::is_none")] pub host_name: Option<String>, #[serde(rename = "publicDns", default, skip_serializing_if = "Option::is_none")] pub public_dns: Option<String>, #[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")] pub access_token: Option<String>, #[serde(rename = "tokenType", default, skip_serializing_if = "Option::is_none")] pub token_type: Option<String>, #[serde(rename = "expiresIn", default, skip_serializing_if = "Option::is_none")] pub expires_in: Option<i32>, #[serde(rename = "refreshToken", default, skip_serializing_if = "Option::is_none")] pub refresh_token: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub scope: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryListCredentialsResult { #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub username: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub passwords: Vec<Password>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Password { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PaginatedComputeResourcesList { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ComputeResource>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeResource { #[serde(flatten)] pub resource: Resource, #[serde(flatten)] pub serde_json_value: serde_json::Value, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<Identity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Compute { #[serde(rename = "computeType")] pub compute_type: ComputeType, #[serde(rename = "computeLocation", default, skip_serializing_if = "Option::is_none")] pub compute_location: Option<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<compute::ProvisioningState>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "createdOn", default, skip_serializing_if = "Option::is_none")] pub created_on: Option<String>, #[serde(rename = "modifiedOn", default, skip_serializing_if = "Option::is_none")] pub modified_on: Option<String>, #[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")] pub resource_id: Option<String>, #[serde(rename = "provisioningErrors", default, skip_serializing_if = "Vec::is_empty")] pub provisioning_errors: Vec<ErrorResponse>, #[serde(rename = "isAttachedCompute", default, skip_serializing_if = "Option::is_none")] pub is_attached_compute: Option<bool>, #[serde(rename = "disableLocalAuth", default, skip_serializing_if = "Option::is_none")] pub disable_local_auth: Option<bool>, } pub mod compute { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Unknown, Updating, Creating, Deleting, Succeeded, Failed, Canceled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Aks { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AmlCompute { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeInstance { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualMachine { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct HdInsight { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataFactory { #[serde(flatten)] pub compute: Compute, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Databricks { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataLakeAnalytics { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SynapseSpark { #[serde(flatten)] pub compute: Compute, #[serde(flatten)] pub synapse_spark_pool_properties: SynapseSparkPoolProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServicePrincipalCredentials { #[serde(rename = "clientId")] pub client_id: String, #[serde(rename = "clientSecret")] pub client_secret: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SystemService { #[serde(rename = "systemServiceType", default, skip_serializing_if = "Option::is_none")] pub system_service_type: Option<String>, #[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")] pub public_ip_address: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub version: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SslConfiguration { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<ssl_configuration::Status>, #[serde(default, skip_serializing_if = "Option::is_none")] pub cert: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub key: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub cname: Option<String>, #[serde(rename = "leafDomainLabel", default, skip_serializing_if = "Option::is_none")] pub leaf_domain_label: Option<String>, #[serde(rename = "overwriteExistingDomain", default, skip_serializing_if = "Option::is_none")] pub overwrite_existing_domain: Option<bool>, } pub mod ssl_configuration { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Disabled, Enabled, Auto, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AksNetworkingConfiguration { #[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")] pub subnet_id: Option<String>, #[serde(rename = "serviceCidr", default, skip_serializing_if = "Option::is_none")] pub service_cidr: Option<String>, #[serde(rename = "dnsServiceIP", default, skip_serializing_if = "Option::is_none")] pub dns_service_ip: Option<String>, #[serde(rename = "dockerBridgeCidr", default, skip_serializing_if = "Option::is_none")] pub docker_bridge_cidr: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UserAccountCredentials { #[serde(rename = "adminUserName")] pub admin_user_name: String, #[serde(rename = "adminUserSshPublicKey", default, skip_serializing_if = "Option::is_none")] pub admin_user_ssh_public_key: Option<String>, #[serde(rename = "adminUserPassword", default, skip_serializing_if = "Option::is_none")] pub admin_user_password: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ScaleSettings { #[serde(rename = "maxNodeCount")] pub max_node_count: i32, #[serde(rename = "minNodeCount", default, skip_serializing_if = "Option::is_none")] pub min_node_count: Option<i32>, #[serde(rename = "nodeIdleTimeBeforeScaleDown", default, skip_serializing_if = "Option::is_none")] pub node_idle_time_before_scale_down: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualMachineImage { pub id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NodeStateCounts { #[serde(rename = "idleNodeCount", default, skip_serializing_if = "Option::is_none")] pub idle_node_count: Option<i32>, #[serde(rename = "runningNodeCount", default, skip_serializing_if = "Option::is_none")] pub running_node_count: Option<i32>, #[serde(rename = "preparingNodeCount", default, skip_serializing_if = "Option::is_none")] pub preparing_node_count: Option<i32>, #[serde(rename = "unusableNodeCount", default, skip_serializing_if = "Option::is_none")] pub unusable_node_count: Option<i32>, #[serde(rename = "leavingNodeCount", default, skip_serializing_if = "Option::is_none")] pub leaving_node_count: Option<i32>, #[serde(rename = "preemptedNodeCount", default, skip_serializing_if = "Option::is_none")] pub preempted_node_count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClusterUpdateProperties { #[serde(rename = "scaleSettings", default, skip_serializing_if = "Option::is_none")] pub scale_settings: Option<ScaleSettings>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClusterUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ClusterUpdateProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeNodesInformation { #[serde(rename = "computeType")] pub compute_type: ComputeType, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AmlComputeNodesInformation { #[serde(flatten)] pub compute_nodes_information: ComputeNodesInformation, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AmlComputeNodeInformation { #[serde(rename = "nodeId", default, skip_serializing_if = "Option::is_none")] pub node_id: Option<String>, #[serde(rename = "privateIpAddress", default, skip_serializing_if = "Option::is_none")] pub private_ip_address: Option<String>, #[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")] pub public_ip_address: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub port: Option<f64>, #[serde(rename = "nodeState", default, skip_serializing_if = "Option::is_none")] pub node_state: Option<aml_compute_node_information::NodeState>, #[serde(rename = "runId", default, skip_serializing_if = "Option::is_none")] pub run_id: Option<String>, } pub mod aml_compute_node_information { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum NodeState { #[serde(rename = "idle")] Idle, #[serde(rename = "running")] Running, #[serde(rename = "preparing")] Preparing, #[serde(rename = "unusable")] Unusable, #[serde(rename = "leaving")] Leaving, #[serde(rename = "preempted")] Preempted, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualMachineSshCredentials { #[serde(default, skip_serializing_if = "Option::is_none")] pub username: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub password: Option<String>, #[serde(rename = "publicKeyData", default, skip_serializing_if = "Option::is_none")] pub public_key_data: Option<String>, #[serde(rename = "privateKeyData", default, skip_serializing_if = "Option::is_none")] pub private_key_data: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeSecrets { #[serde(rename = "computeType")] pub compute_type: ComputeType, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AksComputeSecrets { #[serde(flatten)] pub compute_secrets: ComputeSecrets, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualMachineSecrets { #[serde(flatten)] pub compute_secrets: ComputeSecrets, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DatabricksComputeSecrets { #[serde(flatten)] pub compute_secrets: ComputeSecrets, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ComputeType { #[serde(rename = "AKS")] Aks, AmlCompute, ComputeInstance, DataFactory, VirtualMachine, #[serde(rename = "HDInsight")] HdInsight, Databricks, DataLakeAnalytics, SynapseSpark, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Sku { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tier: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpointConnectionListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<PrivateEndpointConnection>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpointConnection { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<PrivateEndpointConnectionProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<Identity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpointConnectionProperties { #[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")] pub private_endpoint: Option<PrivateEndpoint>, #[serde(rename = "privateLinkServiceConnectionState")] pub private_link_service_connection_state: PrivateLinkServiceConnectionState, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<PrivateEndpointConnectionProvisioningState>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateEndpoint { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "subnetArmId", default, skip_serializing_if = "Option::is_none")] pub subnet_arm_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkServiceConnectionState { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<PrivateEndpointServiceConnectionStatus>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "actionsRequired", default, skip_serializing_if = "Option::is_none")] pub actions_required: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum PrivateEndpointServiceConnectionStatus { Pending, Approved, Rejected, Disconnected, Timeout, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum PrivateEndpointConnectionProvisioningState { Succeeded, Creating, Deleting, Failed, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkResourceListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<PrivateLinkResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<PrivateLinkResourceProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<Identity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PrivateLinkResourceProperties { #[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")] pub group_id: Option<String>, #[serde(rename = "requiredMembers", default, skip_serializing_if = "Vec::is_empty")] pub required_members: Vec<String>, #[serde(rename = "requiredZoneNames", default, skip_serializing_if = "Vec::is_empty")] pub required_zone_names: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SharedPrivateLinkResource { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SharedPrivateLinkResourceProperty>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SharedPrivateLinkResourceProperty { #[serde(rename = "privateLinkResourceId", default, skip_serializing_if = "Option::is_none")] pub private_link_resource_id: Option<String>, #[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")] pub group_id: Option<String>, #[serde(rename = "requestMessage", default, skip_serializing_if = "Option::is_none")] pub request_message: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<PrivateEndpointServiceConnectionStatus>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EncryptionProperty { pub status: encryption_property::Status, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<IdentityForCmk>, #[serde(rename = "keyVaultProperties")] pub key_vault_properties: KeyVaultProperties, } pub mod encryption_property { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Enabled, Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct KeyVaultProperties { #[serde(rename = "keyVaultArmId")] pub key_vault_arm_id: String, #[serde(rename = "keyIdentifier")] pub key_identifier: String, #[serde(rename = "identityClientId", default, skip_serializing_if = "Option::is_none")] pub identity_client_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IdentityForCmk { #[serde(rename = "userAssignedIdentity", default, skip_serializing_if = "Option::is_none")] pub user_assigned_identity: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerResourceRequirements { #[serde(default, skip_serializing_if = "Option::is_none")] pub cpu: Option<f64>, #[serde(rename = "cpuLimit", default, skip_serializing_if = "Option::is_none")] pub cpu_limit: Option<f64>, #[serde(rename = "memoryInGB", default, skip_serializing_if = "Option::is_none")] pub memory_in_gb: Option<f64>, #[serde(rename = "memoryInGBLimit", default, skip_serializing_if = "Option::is_none")] pub memory_in_gb_limit: Option<f64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub gpu: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub fpga: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeInstanceSshSettings { #[serde(rename = "sshPublicAccess", default, skip_serializing_if = "Option::is_none")] pub ssh_public_access: Option<compute_instance_ssh_settings::SshPublicAccess>, #[serde(rename = "adminUserName", default, skip_serializing_if = "Option::is_none")] pub admin_user_name: Option<String>, #[serde(rename = "sshPort", default, skip_serializing_if = "Option::is_none")] pub ssh_port: Option<i32>, #[serde(rename = "adminPublicKey", default, skip_serializing_if = "Option::is_none")] pub admin_public_key: Option<String>, } pub mod compute_instance_ssh_settings { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum SshPublicAccess { Enabled, Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ComputeInstanceState { Creating, CreateFailed, Deleting, Running, Restarting, JobRunning, SettingUp, SetupFailed, Starting, Stopped, Stopping, UserSettingUp, UserSetupFailed, Unknown, Unusable, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeInstanceLastOperation { #[serde(rename = "operationName", default, skip_serializing_if = "Option::is_none")] pub operation_name: Option<compute_instance_last_operation::OperationName>, #[serde(rename = "operationTime", default, skip_serializing_if = "Option::is_none")] pub operation_time: Option<String>, #[serde(rename = "operationStatus", default, skip_serializing_if = "Option::is_none")] pub operation_status: Option<compute_instance_last_operation::OperationStatus>, } pub mod compute_instance_last_operation { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum OperationName { Create, Start, Stop, Restart, Reimage, Delete, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum OperationStatus { InProgress, Succeeded, CreateFailed, StartFailed, StopFailed, RestartFailed, ReimageFailed, DeleteFailed, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeInstanceApplication { #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "endpointUri", default, skip_serializing_if = "Option::is_none")] pub endpoint_uri: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeInstanceConnectivityEndpoints { #[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")] pub public_ip_address: Option<String>, #[serde(rename = "privateIpAddress", default, skip_serializing_if = "Option::is_none")] pub private_ip_address: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeInstanceCreatedBy { #[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")] pub user_name: Option<String>, #[serde(rename = "userOrgId", default, skip_serializing_if = "Option::is_none")] pub user_org_id: Option<String>, #[serde(rename = "userId", default, skip_serializing_if = "Option::is_none")] pub user_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PersonalComputeInstanceSettings { #[serde(rename = "assignedUser", default, skip_serializing_if = "Option::is_none")] pub assigned_user: Option<AssignedUser>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AssignedUser { #[serde(rename = "objectId")] pub object_id: String, #[serde(rename = "tenantId")] pub tenant_id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServiceManagedResourcesSettings { #[serde(rename = "cosmosDb", default, skip_serializing_if = "Option::is_none")] pub cosmos_db: Option<CosmosDbSettings>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CosmosDbSettings { #[serde(rename = "collectionsThroughput", default, skip_serializing_if = "Option::is_none")] pub collections_throughput: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NotebookResourceInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub fqdn: Option<String>, #[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")] pub resource_id: Option<String>, #[serde(rename = "notebookPreparationError", default, skip_serializing_if = "Option::is_none")] pub notebook_preparation_error: Option<NotebookPreparationError>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NotebookPreparationError { #[serde(rename = "errorMessage", default, skip_serializing_if = "Option::is_none")] pub error_message: Option<String>, #[serde(rename = "statusCode", default, skip_serializing_if = "Option::is_none")] pub status_code: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ListNotebookKeysResult { #[serde(rename = "primaryAccessKey", default, skip_serializing_if = "Option::is_none")] pub primary_access_key: Option<String>, #[serde(rename = "secondaryAccessKey", default, skip_serializing_if = "Option::is_none")] pub secondary_access_key: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ListStorageAccountKeysResult { #[serde(rename = "userStorageKey", default, skip_serializing_if = "Option::is_none")] pub user_storage_key: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PaginatedWorkspaceConnectionsList { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<WorkspaceConnection>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceConnection { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkspaceConnectionProps>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceConnectionProps { #[serde(default, skip_serializing_if = "Option::is_none")] pub category: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub target: Option<String>, #[serde(rename = "authType", default, skip_serializing_if = "Option::is_none")] pub auth_type: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, #[serde(rename = "valueFormat", default, skip_serializing_if = "Option::is_none")] pub value_format: Option<workspace_connection_props::ValueFormat>, } pub mod workspace_connection_props { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ValueFormat { #[serde(rename = "JSON")] Json, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SetupScripts { #[serde(default, skip_serializing_if = "Option::is_none")] pub scripts: Option<ScriptsToExecute>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ScriptsToExecute { #[serde(rename = "startupScript", default, skip_serializing_if = "Option::is_none")] pub startup_script: Option<ScriptReference>, #[serde(rename = "creationScript", default, skip_serializing_if = "Option::is_none")] pub creation_script: Option<ScriptReference>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ScriptReference { #[serde(rename = "scriptSource", default, skip_serializing_if = "Option::is_none")] pub script_source: Option<String>, #[serde(rename = "scriptData", default, skip_serializing_if = "Option::is_none")] pub script_data: Option<String>, #[serde(rename = "scriptArguments", default, skip_serializing_if = "Option::is_none")] pub script_arguments: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub timeout: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AutoScaleProperties { #[serde(rename = "minNodeCount", default, skip_serializing_if = "Option::is_none")] pub min_node_count: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub enabled: Option<bool>, #[serde(rename = "maxNodeCount", default, skip_serializing_if = "Option::is_none")] pub max_node_count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AutoPauseProperties { #[serde(rename = "delayInMinutes", default, skip_serializing_if = "Option::is_none")] pub delay_in_minutes: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub enabled: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SynapseSparkPoolProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<synapse_spark_pool_properties::Properties>, } pub mod synapse_spark_pool_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Properties { #[serde(rename = "autoScaleProperties", default, skip_serializing_if = "Option::is_none")] pub auto_scale_properties: Option<AutoScaleProperties>, #[serde(rename = "autoPauseProperties", default, skip_serializing_if = "Option::is_none")] pub auto_pause_properties: Option<AutoPauseProperties>, #[serde(rename = "sparkVersion", default, skip_serializing_if = "Option::is_none")] pub spark_version: Option<String>, #[serde(rename = "nodeCount", default, skip_serializing_if = "Option::is_none")] pub node_count: Option<i32>, #[serde(rename = "nodeSize", default, skip_serializing_if = "Option::is_none")] pub node_size: Option<String>, #[serde(rename = "nodeSizeFamily", default, skip_serializing_if = "Option::is_none")] pub node_size_family: Option<String>, #[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")] pub subscription_id: Option<String>, #[serde(rename = "resourceGroup", default, skip_serializing_if = "Option::is_none")] pub resource_group: Option<String>, #[serde(rename = "workspaceName", default, skip_serializing_if = "Option::is_none")] pub workspace_name: Option<String>, #[serde(rename = "poolName", default, skip_serializing_if = "Option::is_none")] pub pool_name: Option<String>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeStartStopSchedule { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "provisioningStatus", default, skip_serializing_if = "Option::is_none")] pub provisioning_status: Option<compute_start_stop_schedule::ProvisioningStatus>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<ScheduleStatus>, #[serde(rename = "triggerType", default, skip_serializing_if = "Option::is_none")] pub trigger_type: Option<TriggerType>, #[serde(default, skip_serializing_if = "Option::is_none")] pub action: Option<ComputePowerAction>, #[serde(default, skip_serializing_if = "Option::is_none")] pub recurrence: Option<Recurrence>, #[serde(default, skip_serializing_if = "Option::is_none")] pub cron: Option<Cron>, } pub mod compute_start_stop_schedule { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningStatus { Completed, Provisioning, Failed, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ScheduleStatus { Enabled, Disabled, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ScheduleType { ComputeStartStop, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ComputePowerAction { Start, Stop, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum TriggerType { Recurrence, Cron, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RecurrenceFrequency { NotSpecified, Second, Minute, Hour, Day, Week, Month, Year, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecurrenceSchedule { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub minutes: Vec<i32>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub hours: Vec<i32>, #[serde(rename = "weekDays", default, skip_serializing_if = "Vec::is_empty")] pub week_days: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Recurrence { #[serde(default, skip_serializing_if = "Option::is_none")] pub frequency: Option<RecurrenceFrequency>, #[serde(default, skip_serializing_if = "Option::is_none")] pub interval: Option<i32>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "timeZone", default, skip_serializing_if = "Option::is_none")] pub time_zone: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub schedule: Option<RecurrenceSchedule>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Cron { #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "timeZone", default, skip_serializing_if = "Option::is_none")] pub time_zone: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub expression: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeSchedules { #[serde(rename = "computeStartStop", default, skip_serializing_if = "Vec::is_empty")] pub compute_start_stop: Vec<ComputeStartStopSchedule>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AccountKeyDatastoreCredentials { #[serde(flatten)] pub datastore_credentials: DatastoreCredentials, #[serde(default, skip_serializing_if = "Option::is_none")] pub secrets: Option<AccountKeyDatastoreSecrets>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AccountKeyDatastoreSecrets { #[serde(flatten)] pub datastore_secrets: DatastoreSecrets, #[serde(default, skip_serializing_if = "Option::is_none")] pub key: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AmlToken { #[serde(flatten)] pub identity_configuration: IdentityConfiguration, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AssetReferenceBase { #[serde(rename = "referenceType")] pub reference_type: ReferenceType, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AutoScaleSettings { #[serde(flatten)] pub online_scale_settings: OnlineScaleSettings, #[serde(rename = "pollingInterval", default, skip_serializing_if = "Option::is_none")] pub polling_interval: Option<String>, #[serde(rename = "targetUtilizationPercentage", default, skip_serializing_if = "Option::is_none")] pub target_utilization_percentage: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBlobContents { #[serde(flatten)] pub datastore_contents: DatastoreContents, #[serde(rename = "accountName")] pub account_name: String, #[serde(rename = "containerName")] pub container_name: String, pub credentials: DatastoreCredentials, pub endpoint: String, pub protocol: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureDataLakeGen1Contents { #[serde(flatten)] pub datastore_contents: DatastoreContents, pub credentials: DatastoreCredentials, #[serde(rename = "storeName")] pub store_name: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureDataLakeGen2Contents { #[serde(flatten)] pub datastore_contents: DatastoreContents, #[serde(rename = "accountName")] pub account_name: String, #[serde(rename = "containerName")] pub container_name: String, pub credentials: DatastoreCredentials, pub endpoint: String, pub protocol: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureFileContents { #[serde(flatten)] pub datastore_contents: DatastoreContents, #[serde(rename = "accountName")] pub account_name: String, #[serde(rename = "containerName")] pub container_name: String, pub credentials: DatastoreCredentials, pub endpoint: String, pub protocol: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzurePostgreSqlContents { #[serde(flatten)] pub datastore_contents: DatastoreContents, pub credentials: DatastoreCredentials, #[serde(rename = "databaseName")] pub database_name: String, #[serde(rename = "enableSSL", default, skip_serializing_if = "Option::is_none")] pub enable_ssl: Option<bool>, pub endpoint: String, #[serde(rename = "portNumber")] pub port_number: i32, #[serde(rename = "serverName")] pub server_name: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureSqlDatabaseContents { #[serde(flatten)] pub datastore_contents: DatastoreContents, pub credentials: DatastoreCredentials, #[serde(rename = "databaseName")] pub database_name: String, pub endpoint: String, #[serde(rename = "portNumber")] pub port_number: i32, #[serde(rename = "serverName")] pub server_name: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BanditPolicy { #[serde(flatten)] pub early_termination_policy: EarlyTerminationPolicy, #[serde(rename = "slackAmount", default, skip_serializing_if = "Option::is_none")] pub slack_amount: Option<f32>, #[serde(rename = "slackFactor", default, skip_serializing_if = "Option::is_none")] pub slack_factor: Option<f32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BatchDeployment { #[serde(rename = "codeConfiguration", default, skip_serializing_if = "Option::is_none")] pub code_configuration: Option<CodeConfiguration>, #[serde(default, skip_serializing_if = "Option::is_none")] pub compute: Option<ComputeConfiguration>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "environmentId", default, skip_serializing_if = "Option::is_none")] pub environment_id: Option<String>, #[serde(rename = "environmentVariables", default, skip_serializing_if = "Option::is_none")] pub environment_variables: Option<serde_json::Value>, #[serde(rename = "errorThreshold", default, skip_serializing_if = "Option::is_none")] pub error_threshold: Option<i32>, #[serde(rename = "loggingLevel", default, skip_serializing_if = "Option::is_none")] pub logging_level: Option<BatchLoggingLevel>, #[serde(rename = "miniBatchSize", default, skip_serializing_if = "Option::is_none")] pub mini_batch_size: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub model: Option<AssetReferenceBase>, #[serde(rename = "outputConfiguration", default, skip_serializing_if = "Option::is_none")] pub output_configuration: Option<BatchOutputConfiguration>, #[serde(rename = "partitionKeys", default, skip_serializing_if = "Vec::is_empty")] pub partition_keys: Vec<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(rename = "retrySettings", default, skip_serializing_if = "Option::is_none")] pub retry_settings: Option<BatchRetrySettings>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BatchDeploymentTrackedResource { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<ResourceIdentity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, pub properties: BatchDeployment, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BatchDeploymentTrackedResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<BatchDeploymentTrackedResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BatchEndpoint { #[serde(rename = "authMode", default, skip_serializing_if = "Option::is_none")] pub auth_mode: Option<EndpointAuthMode>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub keys: Option<EndpointAuthKeys>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(rename = "scoringUri", default, skip_serializing_if = "Option::is_none")] pub scoring_uri: Option<String>, #[serde(rename = "swaggerUri", default, skip_serializing_if = "Option::is_none")] pub swagger_uri: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub traffic: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BatchEndpointTrackedResource { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<ResourceIdentity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, pub properties: BatchEndpoint, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BatchEndpointTrackedResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<BatchEndpointTrackedResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BatchLoggingLevel { Info, Warning, Debug, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BatchOutputAction { SummaryOnly, AppendRow, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BatchOutputConfiguration { #[serde(rename = "appendRowFileName", default, skip_serializing_if = "Option::is_none")] pub append_row_file_name: Option<String>, #[serde(rename = "outputAction", default, skip_serializing_if = "Option::is_none")] pub output_action: Option<BatchOutputAction>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BatchRetrySettings { #[serde(rename = "maxRetries", default, skip_serializing_if = "Option::is_none")] pub max_retries: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub timeout: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificateDatastoreCredentials { #[serde(flatten)] pub datastore_credentials: DatastoreCredentials, #[serde(rename = "authorityUrl", default, skip_serializing_if = "Option::is_none")] pub authority_url: Option<String>, #[serde(rename = "clientId")] pub client_id: String, #[serde(rename = "resourceUri", default, skip_serializing_if = "Option::is_none")] pub resource_uri: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub secrets: Option<CertificateDatastoreSecrets>, #[serde(rename = "tenantId")] pub tenant_id: String, pub thumbprint: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificateDatastoreSecrets { #[serde(flatten)] pub datastore_secrets: DatastoreSecrets, #[serde(default, skip_serializing_if = "Option::is_none")] pub certificate: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CocoExportSummary { #[serde(flatten)] pub export_summary: ExportSummary, #[serde(rename = "containerName", default, skip_serializing_if = "Option::is_none")] pub container_name: Option<String>, #[serde(rename = "snapshotPath", default, skip_serializing_if = "Option::is_none")] pub snapshot_path: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CodeConfiguration { #[serde(rename = "codeId", default, skip_serializing_if = "Option::is_none")] pub code_id: Option<String>, #[serde(rename = "scoringScript")] pub scoring_script: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CodeContainer { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CodeContainerResource { #[serde(flatten)] pub resource: Resource, pub properties: CodeContainer, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CodeContainerResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<CodeContainerResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CodeVersion { #[serde(rename = "datastoreId", default, skip_serializing_if = "Option::is_none")] pub datastore_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "isAnonymous", default, skip_serializing_if = "Option::is_none")] pub is_anonymous: Option<bool>, pub path: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CodeVersionResource { #[serde(flatten)] pub resource: Resource, pub properties: CodeVersion, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CodeVersionResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<CodeVersionResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CommandJob { #[serde(flatten)] pub job_base: JobBase, #[serde(rename = "codeId", default, skip_serializing_if = "Option::is_none")] pub code_id: Option<String>, pub command: String, pub compute: ComputeConfiguration, #[serde(default, skip_serializing_if = "Option::is_none")] pub distribution: Option<DistributionConfiguration>, #[serde(rename = "environmentId", default, skip_serializing_if = "Option::is_none")] pub environment_id: Option<String>, #[serde(rename = "environmentVariables", default, skip_serializing_if = "Option::is_none")] pub environment_variables: Option<serde_json::Value>, #[serde(rename = "experimentName", default, skip_serializing_if = "Option::is_none")] pub experiment_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<IdentityConfiguration>, #[serde(rename = "inputDataBindings", default, skip_serializing_if = "Option::is_none")] pub input_data_bindings: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub output: Option<JobOutput>, #[serde(rename = "outputDataBindings", default, skip_serializing_if = "Option::is_none")] pub output_data_bindings: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub parameters: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub priority: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<JobStatus>, #[serde(default, skip_serializing_if = "Option::is_none")] pub timeout: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ComputeConfiguration { #[serde(rename = "instanceCount", default, skip_serializing_if = "Option::is_none")] pub instance_count: Option<i32>, #[serde(rename = "instanceType", default, skip_serializing_if = "Option::is_none")] pub instance_type: Option<String>, #[serde(rename = "isLocal", default, skip_serializing_if = "Option::is_none")] pub is_local: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub target: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ContainerType { StorageInitializer, InferenceServer, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ContentsType { AzureBlob, AzureDataLakeGen1, AzureDataLakeGen2, AzureFile, AzureMySql, AzurePostgreSql, AzureSqlDatabase, GlusterFs, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum CredentialsType { AccountKey, Certificate, None, Sas, ServicePrincipal, SqlAdmin, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CsvExportSummary { #[serde(flatten)] pub export_summary: ExportSummary, #[serde(rename = "containerName", default, skip_serializing_if = "Option::is_none")] pub container_name: Option<String>, #[serde(rename = "snapshotPath", default, skip_serializing_if = "Option::is_none")] pub snapshot_path: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DataBindingMode { Mount, Download, Upload, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataContainer { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataContainerResource { #[serde(flatten)] pub resource: Resource, pub properties: DataContainer, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataContainerResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<DataContainerResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataPathAssetReference { #[serde(flatten)] pub asset_reference_base: AssetReferenceBase, #[serde(rename = "datastoreId", default, skip_serializing_if = "Option::is_none")] pub datastore_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataVersion { #[serde(rename = "datasetType", default, skip_serializing_if = "Option::is_none")] pub dataset_type: Option<DatasetType>, #[serde(rename = "datastoreId", default, skip_serializing_if = "Option::is_none")] pub datastore_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "isAnonymous", default, skip_serializing_if = "Option::is_none")] pub is_anonymous: Option<bool>, pub path: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataVersionResource { #[serde(flatten)] pub resource: Resource, pub properties: DataVersion, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DataVersionResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<DataVersionResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DatasetExportSummary { #[serde(flatten)] pub export_summary: ExportSummary, #[serde(rename = "labeledAssetName", default, skip_serializing_if = "Option::is_none")] pub labeled_asset_name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DatasetType { Simple, Dataflow, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DatastoreContents { #[serde(rename = "contentsType")] pub contents_type: ContentsType, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DatastoreCredentials { #[serde(rename = "credentialsType")] pub credentials_type: CredentialsType, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DatastoreProperties { pub contents: DatastoreContents, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "hasBeenValidated", default, skip_serializing_if = "Option::is_none")] pub has_been_validated: Option<bool>, #[serde(rename = "isDefault", default, skip_serializing_if = "Option::is_none")] pub is_default: Option<bool>, #[serde(rename = "linkedInfo", default, skip_serializing_if = "Option::is_none")] pub linked_info: Option<LinkedInfo>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DatastorePropertiesResource { #[serde(flatten)] pub resource: Resource, pub properties: DatastoreProperties, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DatastorePropertiesResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<DatastorePropertiesResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DatastoreSecrets { #[serde(rename = "secretsType")] pub secrets_type: SecretsType, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DeploymentLogs { #[serde(default, skip_serializing_if = "Option::is_none")] pub content: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DeploymentLogsRequest { #[serde(rename = "containerType", default, skip_serializing_if = "Option::is_none")] pub container_type: Option<ContainerType>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tail: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DeploymentProvisioningState { Creating, Deleting, Scaling, Updating, Succeeded, Failed, Canceled, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DistributionConfiguration { #[serde(rename = "distributionType")] pub distribution_type: DistributionType, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DistributionType { PyTorch, TensorFlow, Mpi, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DockerBuild { #[serde(flatten)] pub docker_specification: DockerSpecification, #[serde(default, skip_serializing_if = "Option::is_none")] pub context: Option<String>, pub dockerfile: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DockerImage { #[serde(flatten)] pub docker_specification: DockerSpecification, #[serde(rename = "dockerImageUri")] pub docker_image_uri: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DockerImagePlatform { #[serde(rename = "operatingSystemType", default, skip_serializing_if = "Option::is_none")] pub operating_system_type: Option<OperatingSystemType>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DockerSpecification { #[serde(rename = "dockerSpecificationType")] pub docker_specification_type: DockerSpecificationType, #[serde(default, skip_serializing_if = "Option::is_none")] pub platform: Option<DockerImagePlatform>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DockerSpecificationType { Build, Image, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EarlyTerminationPolicy { #[serde(rename = "delayEvaluation", default, skip_serializing_if = "Option::is_none")] pub delay_evaluation: Option<i32>, #[serde(rename = "evaluationInterval", default, skip_serializing_if = "Option::is_none")] pub evaluation_interval: Option<i32>, #[serde(rename = "policyType")] pub policy_type: EarlyTerminationPolicyType, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum EarlyTerminationPolicyType { Bandit, MedianStopping, TruncationSelection, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EndpointAuthKeys { #[serde(rename = "primaryKey", default, skip_serializing_if = "Option::is_none")] pub primary_key: Option<String>, #[serde(rename = "secondaryKey", default, skip_serializing_if = "Option::is_none")] pub secondary_key: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum EndpointAuthMode { #[serde(rename = "AMLToken")] AmlToken, Key, #[serde(rename = "AADToken")] AadToken, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EndpointAuthToken { #[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")] pub access_token: Option<String>, #[serde(rename = "expiryTimeUtc", default, skip_serializing_if = "Option::is_none")] pub expiry_time_utc: Option<i64>, #[serde(rename = "refreshAfterTimeUtc", default, skip_serializing_if = "Option::is_none")] pub refresh_after_time_utc: Option<i64>, #[serde(rename = "tokenType", default, skip_serializing_if = "Option::is_none")] pub token_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum EndpointComputeType { Managed, #[serde(rename = "K8S")] K8s, #[serde(rename = "AzureMLCompute")] AzureMlCompute, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum EndpointProvisioningState { Creating, Deleting, Succeeded, Failed, Updating, Canceled, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentContainer { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentContainerResource { #[serde(flatten)] pub resource: Resource, pub properties: EnvironmentContainer, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentContainerResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<EnvironmentContainerResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum EnvironmentSpecificationType { Curated, UserCreated, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentSpecificationVersion { #[serde(rename = "condaFile", default, skip_serializing_if = "Option::is_none")] pub conda_file: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub docker: Option<DockerSpecification>, #[serde(rename = "environmentSpecificationType", default, skip_serializing_if = "Option::is_none")] pub environment_specification_type: Option<EnvironmentSpecificationType>, #[serde(rename = "inferenceContainerProperties", default, skip_serializing_if = "Option::is_none")] pub inference_container_properties: Option<InferenceContainerProperties>, #[serde(rename = "isAnonymous", default, skip_serializing_if = "Option::is_none")] pub is_anonymous: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentSpecificationVersionResource { #[serde(flatten)] pub resource: Resource, pub properties: EnvironmentSpecificationVersion, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EnvironmentSpecificationVersionResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<EnvironmentSpecificationVersionResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ExportFormatType { Dataset, Coco, #[serde(rename = "CSV")] Csv, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ExportSummary { #[serde(rename = "endTimeUtc", default, skip_serializing_if = "Option::is_none")] pub end_time_utc: Option<String>, #[serde(rename = "exportedRowCount", default, skip_serializing_if = "Option::is_none")] pub exported_row_count: Option<i64>, pub format: ExportFormatType, #[serde(rename = "labelingJobId", default, skip_serializing_if = "Option::is_none")] pub labeling_job_id: Option<String>, #[serde(rename = "startTimeUtc", default, skip_serializing_if = "Option::is_none")] pub start_time_utc: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct FlavorData { #[serde(default, skip_serializing_if = "Option::is_none")] pub data: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GlusterFsContents { #[serde(flatten)] pub datastore_contents: DatastoreContents, #[serde(rename = "serverAddress")] pub server_address: String, #[serde(rename = "volumeName")] pub volume_name: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Goal { Minimize, Maximize, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IdAssetReference { #[serde(flatten)] pub asset_reference_base: AssetReferenceBase, #[serde(rename = "assetId")] pub asset_id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IdentityConfiguration { #[serde(rename = "identityType")] pub identity_type: IdentityConfigurationType, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum IdentityConfigurationType { Managed, #[serde(rename = "AMLToken")] AmlToken, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ImageAnnotationType { Classification, BoundingBox, InstanceSegmentation, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InferenceContainerProperties { #[serde(rename = "livenessRoute", default, skip_serializing_if = "Option::is_none")] pub liveness_route: Option<Route>, #[serde(rename = "readinessRoute", default, skip_serializing_if = "Option::is_none")] pub readiness_route: Option<Route>, #[serde(rename = "scoringRoute", default, skip_serializing_if = "Option::is_none")] pub scoring_route: Option<Route>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InputDataBinding { #[serde(rename = "dataId", default, skip_serializing_if = "Option::is_none")] pub data_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option<DataBindingMode>, #[serde(rename = "pathOnCompute", default, skip_serializing_if = "Option::is_none")] pub path_on_compute: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobBase { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "interactionEndpoints", default, skip_serializing_if = "Option::is_none")] pub interaction_endpoints: Option<serde_json::Value>, #[serde(rename = "jobType")] pub job_type: JobType, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<JobProvisioningState>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobBaseResource { #[serde(flatten)] pub resource: Resource, pub properties: JobBase, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobBaseResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<JobBaseResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobEndpoint { #[serde(default, skip_serializing_if = "Option::is_none")] pub endpoint: Option<String>, #[serde(rename = "jobEndpointType", default, skip_serializing_if = "Option::is_none")] pub job_endpoint_type: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub port: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobOutput { #[serde(rename = "datastoreId", default, skip_serializing_if = "Option::is_none")] pub datastore_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum JobProvisioningState { Succeeded, Failed, Canceled, InProgress, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum JobStatus { NotStarted, Starting, Provisioning, Preparing, Queued, Running, Finalizing, CancelRequested, Completed, Failed, Canceled, NotResponding, Paused, Unknown, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum JobType { Command, Sweep, Labeling, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct K8sOnlineDeployment { #[serde(flatten)] pub online_deployment: OnlineDeployment, #[serde(rename = "containerResourceRequirements", default, skip_serializing_if = "Option::is_none")] pub container_resource_requirements: Option<ContainerResourceRequirements>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum KeyType { Primary, Secondary, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LabelCategory { #[serde(rename = "allowMultiSelect", default, skip_serializing_if = "Option::is_none")] pub allow_multi_select: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub classes: Option<serde_json::Value>, #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LabelClass { #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub subclasses: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LabelingDatasetConfiguration { #[serde(rename = "assetName", default, skip_serializing_if = "Option::is_none")] pub asset_name: Option<String>, #[serde(rename = "datasetVersion", default, skip_serializing_if = "Option::is_none")] pub dataset_version: Option<String>, #[serde(rename = "incrementalDatasetRefreshEnabled", default, skip_serializing_if = "Option::is_none")] pub incremental_dataset_refresh_enabled: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LabelingJob { #[serde(rename = "createdTimeUtc", default, skip_serializing_if = "Option::is_none")] pub created_time_utc: Option<String>, #[serde(rename = "datasetConfiguration", default, skip_serializing_if = "Option::is_none")] pub dataset_configuration: Option<LabelingDatasetConfiguration>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "interactionEndpoints", default, skip_serializing_if = "Option::is_none")] pub interaction_endpoints: Option<serde_json::Value>, #[serde(rename = "jobInstructions", default, skip_serializing_if = "Option::is_none")] pub job_instructions: Option<LabelingJobInstructions>, #[serde(rename = "jobType")] pub job_type: JobType, #[serde(rename = "labelCategories", default, skip_serializing_if = "Option::is_none")] pub label_categories: Option<serde_json::Value>, #[serde(rename = "labelingJobMediaProperties", default, skip_serializing_if = "Option::is_none")] pub labeling_job_media_properties: Option<LabelingJobMediaProperties>, #[serde(rename = "mlAssistConfiguration", default, skip_serializing_if = "Option::is_none")] pub ml_assist_configuration: Option<MlAssistConfiguration>, #[serde(rename = "progressMetrics", default, skip_serializing_if = "Option::is_none")] pub progress_metrics: Option<ProgressMetrics>, #[serde(rename = "projectId", default, skip_serializing_if = "Option::is_none")] pub project_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<JobProvisioningState>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<JobStatus>, #[serde(rename = "statusMessages", default, skip_serializing_if = "Vec::is_empty")] pub status_messages: Vec<StatusMessage>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LabelingJobImageProperties { #[serde(flatten)] pub labeling_job_media_properties: LabelingJobMediaProperties, #[serde(rename = "annotationType", default, skip_serializing_if = "Option::is_none")] pub annotation_type: Option<ImageAnnotationType>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LabelingJobInstructions { #[serde(default, skip_serializing_if = "Option::is_none")] pub uri: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LabelingJobMediaProperties { #[serde(rename = "mediaType")] pub media_type: MediaType, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LabelingJobResource { #[serde(flatten)] pub resource: Resource, pub properties: LabelingJob, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LabelingJobResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<LabelingJobResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LabelingJobTextProperties { #[serde(flatten)] pub labeling_job_media_properties: LabelingJobMediaProperties, #[serde(rename = "annotationType", default, skip_serializing_if = "Option::is_none")] pub annotation_type: Option<TextAnnotationType>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LinkedInfo { #[serde(rename = "linkedId", default, skip_serializing_if = "Option::is_none")] pub linked_id: Option<String>, #[serde(rename = "linkedResourceName", default, skip_serializing_if = "Option::is_none")] pub linked_resource_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub origin: Option<OriginType>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MlAssistConfiguration { #[serde(rename = "inferencingComputeBinding", default, skip_serializing_if = "Option::is_none")] pub inferencing_compute_binding: Option<ComputeConfiguration>, #[serde(rename = "mlAssistEnabled", default, skip_serializing_if = "Option::is_none")] pub ml_assist_enabled: Option<bool>, #[serde(rename = "trainingComputeBinding", default, skip_serializing_if = "Option::is_none")] pub training_compute_binding: Option<ComputeConfiguration>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedIdentity { #[serde(flatten)] pub identity_configuration: IdentityConfiguration, #[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")] pub client_id: Option<String>, #[serde(rename = "objectId", default, skip_serializing_if = "Option::is_none")] pub object_id: Option<String>, #[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")] pub resource_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedOnlineDeployment { #[serde(flatten)] pub online_deployment: OnlineDeployment, #[serde(rename = "instanceType", default, skip_serializing_if = "Option::is_none")] pub instance_type: Option<String>, #[serde(rename = "readinessProbe", default, skip_serializing_if = "Option::is_none")] pub readiness_probe: Option<ProbeSettings>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManualScaleSettings { #[serde(flatten)] pub online_scale_settings: OnlineScaleSettings, #[serde(rename = "instanceCount", default, skip_serializing_if = "Option::is_none")] pub instance_count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum MediaType { Image, Text, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MedianStoppingPolicy { #[serde(flatten)] pub early_termination_policy: EarlyTerminationPolicy, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ModelContainer { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ModelContainerResource { #[serde(flatten)] pub resource: Resource, pub properties: ModelContainer, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ModelContainerResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ModelContainerResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ModelVersion { #[serde(rename = "datastoreId", default, skip_serializing_if = "Option::is_none")] pub datastore_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub flavors: Option<serde_json::Value>, #[serde(rename = "isAnonymous", default, skip_serializing_if = "Option::is_none")] pub is_anonymous: Option<bool>, pub path: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ModelVersionResource { #[serde(flatten)] pub resource: Resource, pub properties: ModelVersion, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ModelVersionResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ModelVersionResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Mpi { #[serde(flatten)] pub distribution_configuration: DistributionConfiguration, #[serde(rename = "processCountPerInstance", default, skip_serializing_if = "Option::is_none")] pub process_count_per_instance: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NoneDatastoreCredentials { #[serde(flatten)] pub datastore_credentials: DatastoreCredentials, #[serde(default, skip_serializing_if = "Option::is_none")] pub secrets: Option<NoneDatastoreSecrets>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NoneDatastoreSecrets { #[serde(flatten)] pub datastore_secrets: DatastoreSecrets, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Objective { pub goal: Goal, #[serde(rename = "primaryMetric")] pub primary_metric: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OnlineDeployment { #[serde(rename = "appInsightsEnabled", default, skip_serializing_if = "Option::is_none")] pub app_insights_enabled: Option<bool>, #[serde(rename = "codeConfiguration", default, skip_serializing_if = "Option::is_none")] pub code_configuration: Option<CodeConfiguration>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "endpointComputeType")] pub endpoint_compute_type: EndpointComputeType, #[serde(rename = "environmentId", default, skip_serializing_if = "Option::is_none")] pub environment_id: Option<String>, #[serde(rename = "environmentVariables", default, skip_serializing_if = "Option::is_none")] pub environment_variables: Option<serde_json::Value>, #[serde(rename = "livenessProbe", default, skip_serializing_if = "Option::is_none")] pub liveness_probe: Option<ProbeSettings>, #[serde(default, skip_serializing_if = "Option::is_none")] pub model: Option<AssetReferenceBase>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<DeploymentProvisioningState>, #[serde(rename = "requestSettings", default, skip_serializing_if = "Option::is_none")] pub request_settings: Option<OnlineRequestSettings>, #[serde(rename = "scaleSettings", default, skip_serializing_if = "Option::is_none")] pub scale_settings: Option<OnlineScaleSettings>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OnlineDeploymentTrackedResource { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<ResourceIdentity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, pub properties: OnlineDeployment, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OnlineDeploymentTrackedResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<OnlineDeploymentTrackedResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OnlineEndpoint { #[serde(rename = "authMode")] pub auth_mode: EndpointAuthMode, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub keys: Option<EndpointAuthKeys>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<EndpointProvisioningState>, #[serde(rename = "scoringUri", default, skip_serializing_if = "Option::is_none")] pub scoring_uri: Option<String>, #[serde(rename = "swaggerUri", default, skip_serializing_if = "Option::is_none")] pub swagger_uri: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub target: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub traffic: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OnlineEndpointTrackedResource { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<ResourceIdentity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, pub properties: OnlineEndpoint, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OnlineEndpointTrackedResourceArmPaginatedResult { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<OnlineEndpointTrackedResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OnlineRequestSettings { #[serde(rename = "maxConcurrentRequestsPerInstance", default, skip_serializing_if = "Option::is_none")] pub max_concurrent_requests_per_instance: Option<i32>, #[serde(rename = "maxQueueWait", default, skip_serializing_if = "Option::is_none")] pub max_queue_wait: Option<String>, #[serde(rename = "requestTimeout", default, skip_serializing_if = "Option::is_none")] pub request_timeout: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OnlineScaleSettings { #[serde(rename = "maxInstances", default, skip_serializing_if = "Option::is_none")] pub max_instances: Option<i32>, #[serde(rename = "minInstances", default, skip_serializing_if = "Option::is_none")] pub min_instances: Option<i32>, #[serde(rename = "scaleType")] pub scale_type: ScaleType, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum OperatingSystemType { Linux, Windows, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum OrderString { CreatedAtDesc, CreatedAtAsc, UpdatedAtDesc, UpdatedAtAsc, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum OriginType { Synapse, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OutputDataBinding { #[serde(rename = "datastoreId", default, skip_serializing_if = "Option::is_none")] pub datastore_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option<DataBindingMode>, #[serde(rename = "pathOnCompute", default, skip_serializing_if = "Option::is_none")] pub path_on_compute: Option<String>, #[serde(rename = "pathOnDatastore", default, skip_serializing_if = "Option::is_none")] pub path_on_datastore: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OutputPathAssetReference { #[serde(flatten)] pub asset_reference_base: AssetReferenceBase, #[serde(rename = "jobId", default, skip_serializing_if = "Option::is_none")] pub job_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PartialAksOnlineDeployment { #[serde(flatten)] pub partial_online_deployment: PartialOnlineDeployment, #[serde(rename = "containerResourceRequirements", default, skip_serializing_if = "Option::is_none")] pub container_resource_requirements: Option<ContainerResourceRequirements>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PartialBatchDeployment { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PartialBatchDeploymentPartialTrackedResource { #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<ResourceIdentity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<PartialBatchDeployment>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PartialBatchEndpoint { #[serde(default, skip_serializing_if = "Option::is_none")] pub traffic: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PartialBatchEndpointPartialTrackedResource { #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<ResourceIdentity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<PartialBatchEndpoint>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PartialManagedOnlineDeployment { #[serde(flatten)] pub partial_online_deployment: PartialOnlineDeployment, #[serde(rename = "readinessProbe", default, skip_serializing_if = "Option::is_none")] pub readiness_probe: Option<ProbeSettings>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PartialOnlineDeployment { #[serde(rename = "appInsightsEnabled", default, skip_serializing_if = "Option::is_none")] pub app_insights_enabled: Option<bool>, #[serde(rename = "endpointComputeType")] pub endpoint_compute_type: EndpointComputeType, #[serde(rename = "livenessProbe", default, skip_serializing_if = "Option::is_none")] pub liveness_probe: Option<ProbeSettings>, #[serde(rename = "requestSettings", default, skip_serializing_if = "Option::is_none")] pub request_settings: Option<OnlineRequestSettings>, #[serde(rename = "scaleSettings", default, skip_serializing_if = "Option::is_none")] pub scale_settings: Option<OnlineScaleSettings>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PartialOnlineDeploymentPartialTrackedResource { #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<ResourceIdentity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<PartialOnlineDeployment>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PartialOnlineEndpoint { #[serde(default, skip_serializing_if = "Option::is_none")] pub traffic: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PartialOnlineEndpointPartialTrackedResource { #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<ResourceIdentity>, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<PartialOnlineEndpoint>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProbeSettings { #[serde(rename = "failureThreshold", default, skip_serializing_if = "Option::is_none")] pub failure_threshold: Option<i32>, #[serde(rename = "initialDelay", default, skip_serializing_if = "Option::is_none")] pub initial_delay: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub period: Option<String>, #[serde(rename = "successThreshold", default, skip_serializing_if = "Option::is_none")] pub success_threshold: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub timeout: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProgressMetrics { #[serde(rename = "completedDatapointCount", default, skip_serializing_if = "Option::is_none")] pub completed_datapoint_count: Option<i64>, #[serde(rename = "incrementalDatasetLastRefreshTime", default, skip_serializing_if = "Option::is_none")] pub incremental_dataset_last_refresh_time: Option<String>, #[serde(rename = "skippedDatapointCount", default, skip_serializing_if = "Option::is_none")] pub skipped_datapoint_count: Option<i64>, #[serde(rename = "totalDatapointCount", default, skip_serializing_if = "Option::is_none")] pub total_datapoint_count: Option<i64>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PyTorch { #[serde(flatten)] pub distribution_configuration: DistributionConfiguration, #[serde(rename = "processCount", default, skip_serializing_if = "Option::is_none")] pub process_count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ReferenceType { Id, DataPath, OutputPath, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegenerateEndpointKeysRequest { #[serde(rename = "keyType")] pub key_type: KeyType, #[serde(rename = "keyValue", default, skip_serializing_if = "Option::is_none")] pub key_value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceIdentity { #[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")] pub principal_id: Option<String>, #[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")] pub tenant_id: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<ResourceIdentityAssignment>, #[serde(rename = "userAssignedIdentities", default, skip_serializing_if = "Option::is_none")] pub user_assigned_identities: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ResourceIdentityAssignment { SystemAssigned, UserAssigned, #[serde(rename = "SystemAssigned,UserAssigned")] SystemAssignedUserAssigned, None, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Route { pub path: String, pub port: i32, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum SamplingAlgorithm { Grid, Random, Bayesian, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SasDatastoreCredentials { #[serde(flatten)] pub datastore_credentials: DatastoreCredentials, #[serde(default, skip_serializing_if = "Option::is_none")] pub secrets: Option<SasDatastoreSecrets>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SasDatastoreSecrets { #[serde(flatten)] pub datastore_secrets: DatastoreSecrets, #[serde(rename = "sasToken", default, skip_serializing_if = "Option::is_none")] pub sas_token: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ScaleType { Auto, Manual, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum SecretsType { AccountKey, Certificate, None, Sas, ServicePrincipal, SqlAdmin, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServicePrincipalDatastoreCredentials { #[serde(flatten)] pub datastore_credentials: DatastoreCredentials, #[serde(rename = "authorityUrl", default, skip_serializing_if = "Option::is_none")] pub authority_url: Option<String>, #[serde(rename = "clientId")] pub client_id: String, #[serde(rename = "resourceUri", default, skip_serializing_if = "Option::is_none")] pub resource_uri: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub secrets: Option<ServicePrincipalDatastoreSecrets>, #[serde(rename = "tenantId")] pub tenant_id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServicePrincipalDatastoreSecrets { #[serde(flatten)] pub datastore_secrets: DatastoreSecrets, #[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")] pub client_secret: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlAdminDatastoreCredentials { #[serde(flatten)] pub datastore_credentials: DatastoreCredentials, #[serde(default, skip_serializing_if = "Option::is_none")] pub secrets: Option<SqlAdminDatastoreSecrets>, #[serde(rename = "userId")] pub user_id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlAdminDatastoreSecrets { #[serde(flatten)] pub datastore_secrets: DatastoreSecrets, #[serde(default, skip_serializing_if = "Option::is_none")] pub password: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct StatusMessage { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(rename = "createdTimeUtc", default, skip_serializing_if = "Option::is_none")] pub created_time_utc: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub level: Option<StatusMessageLevel>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum StatusMessageLevel { Error, Information, Warning, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SweepJob { #[serde(flatten)] pub job_base: JobBase, pub algorithm: SamplingAlgorithm, pub compute: ComputeConfiguration, #[serde(rename = "earlyTermination", default, skip_serializing_if = "Option::is_none")] pub early_termination: Option<EarlyTerminationPolicy>, #[serde(rename = "experimentName", default, skip_serializing_if = "Option::is_none")] pub experiment_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub identity: Option<IdentityConfiguration>, #[serde(rename = "maxConcurrentTrials", default, skip_serializing_if = "Option::is_none")] pub max_concurrent_trials: Option<i32>, #[serde(rename = "maxTotalTrials", default, skip_serializing_if = "Option::is_none")] pub max_total_trials: Option<i32>, pub objective: Objective, #[serde(default, skip_serializing_if = "Option::is_none")] pub output: Option<JobOutput>, #[serde(default, skip_serializing_if = "Option::is_none")] pub priority: Option<i32>, #[serde(rename = "searchSpace")] pub search_space: serde_json::Value, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<JobStatus>, #[serde(default, skip_serializing_if = "Option::is_none")] pub timeout: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub trial: Option<TrialComponent>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TensorFlow { #[serde(flatten)] pub distribution_configuration: DistributionConfiguration, #[serde(rename = "parameterServerCount", default, skip_serializing_if = "Option::is_none")] pub parameter_server_count: Option<i32>, #[serde(rename = "workerCount", default, skip_serializing_if = "Option::is_none")] pub worker_count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum TextAnnotationType { Classification, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TrialComponent { #[serde(rename = "codeId", default, skip_serializing_if = "Option::is_none")] pub code_id: Option<String>, pub command: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub distribution: Option<DistributionConfiguration>, #[serde(rename = "environmentId", default, skip_serializing_if = "Option::is_none")] pub environment_id: Option<String>, #[serde(rename = "environmentVariables", default, skip_serializing_if = "Option::is_none")] pub environment_variables: Option<serde_json::Value>, #[serde(rename = "inputDataBindings", default, skip_serializing_if = "Option::is_none")] pub input_data_bindings: Option<serde_json::Value>, #[serde(rename = "outputDataBindings", default, skip_serializing_if = "Option::is_none")] pub output_data_bindings: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub timeout: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TruncationSelectionPolicy { #[serde(flatten)] pub early_termination_policy: EarlyTerminationPolicy, #[serde(rename = "truncationPercentage", default, skip_serializing_if = "Option::is_none")] pub truncation_percentage: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UserAssignedIdentityMeta { #[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")] pub client_id: Option<String>, #[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")] pub principal_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AmlUserFeature { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ListAmlUserFeatureResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<AmlUserFeature>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SkuListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<WorkspaceSku>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SkuCapability { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Restriction { #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub values: Vec<String>, #[serde(rename = "reasonCode", default, skip_serializing_if = "Option::is_none")] pub reason_code: Option<restriction::ReasonCode>, } pub mod restriction { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ReasonCode { NotSpecified, NotAvailableForRegion, NotAvailableForSubscription, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceSkuLocationInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub zones: Vec<String>, #[serde(rename = "zoneDetails", default, skip_serializing_if = "Vec::is_empty")] pub zone_details: Vec<ResourceSkuZoneDetails>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceSkuZoneDetails { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub name: Vec<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub capabilities: Vec<SkuCapability>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkspaceSku { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub locations: Vec<String>, #[serde(rename = "locationInfo", default, skip_serializing_if = "Vec::is_empty")] pub location_info: Vec<ResourceSkuLocationInfo>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tier: Option<String>, #[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")] pub resource_type: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub capabilities: Vec<SkuCapability>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub restrictions: Vec<Restriction>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<ErrorDetail>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorDetail { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub target: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub details: Vec<ErrorDetail>, #[serde(rename = "additionalInfo", default, skip_serializing_if = "Vec::is_empty")] pub additional_info: Vec<ErrorAdditionalInfo>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorAdditionalInfo { #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub info: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SystemData { #[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")] pub created_by: Option<String>, #[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")] pub created_by_type: Option<system_data::CreatedByType>, #[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")] pub created_at: Option<String>, #[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")] pub last_modified_by: Option<String>, #[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")] pub last_modified_by_type: Option<system_data::LastModifiedByType>, #[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")] pub last_modified_at: Option<String>, } pub mod system_data { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum CreatedByType { User, Application, ManagedIdentity, Key, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum LastModifiedByType { User, Application, ManagedIdentity, Key, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TrackedResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, pub location: String, }
#![feature(assert_matches)] mod archiver; mod piece_reconstruction; mod reconstructor;
pub mod tables; pub mod pages; use super::interrupts::InteruptStack; use self::tables::{EntryFlags, address_to_tables}; use self::pages::{alloc_page}; pub fn handle_page_fault(_vars: &mut InteruptStack) { let cr2: u64; unsafe { asm!("mov $0, cr2" : "=r"(cr2) ::: "intel"); } let pages = address_to_tables(cr2); // unsafe { // if !enabled { // panic!("PF at 0x{:x} accessing 0x{:x} {:?}", vars.rip, cr2, pages); // } // } let table4 = unsafe { &mut ::memory::tables::PROC_TABLE }; let table3 = { if !table4.0[pages.0].is_present() { let page = alloc_page(1); table4.0[pages.0].set(page, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE); println!("Created Level 3 page: {:x}", page); } unsafe { table4.0[pages.0].as_table(0xFFFFFFFF_80000000) } }; let table2 = { if !table3.0[pages.1].is_present() { let page = alloc_page(1); table3.0[pages.1].set(page, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE); println!("Created Level 2 page: {:x}", page); } unsafe { table3.0[pages.1].as_table(0xFFFFFFFF_80000000) } }; let table1 = { if !table2.0[pages.2].is_present() { let page = alloc_page(1); table2.0[pages.2].set(page, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE); println!("Created Level 1 page: {:x}", page); } unsafe { table2.0[pages.2].as_table(0xFFFFFFFF_80000000) } }; if !table1.0[pages.3].is_present() { let page = alloc_page(1); table1.0[pages.3].set(page, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE); // println!("Created used page: {:x} -> {:x} (RIP: {:x})", page, cr2, vars.rip); } // panic!("PF at 0x{:x} accessing 0x{:x} {:?}", vars.rip, cr2, pages); }
/// The base node struct that contains props, and a Vec of child nodes. #[derive(Debug)] pub struct RusxNode<T: Default> { /// The node's props. The only required traits to implement are `Debug` and `Default` pub props: T, /// A vector of child nodes. Can be empty. pub children: Vec<RusxNode<T>>, } impl<T: Default + std::fmt::Debug> RusxNode<T> { /// pub fn new(props: T) -> Self { Self { props: props, children: vec![], } } /// Run a function on a node, and on all child nodes. Runs on itself first, *then* on child nodes, meaning the function will run on the topmost node first. /// /// ``` /// #[derive(Default)] /// struct Foo { /// bar: u8 /// } /// /// let mut tree = rusx! { /// <Foo bar=10> { /// <bar=20>, /// <bar=30>, /// } /// } /// /// tree.bubble_down(|i: Option<usize>, s: &mut<RusxNode<Foo>| { /// s.props.bar += 1; /// println!("{}", s.props.bar); /// }) /// /// // 11 /// // 21 /// // 31 /// ``` pub fn bubble_down<F>(&mut self, func: F) where F: Fn(Option<usize>, &mut Self), { (func)(None, self); for (i, child) in &mut self.children.iter_mut().enumerate() { child._bubble_down(i, &func); } } fn _bubble_down<F>(&mut self, i: usize, func: &F) where F: Fn(Option<usize>, &mut Self), { (func)(Some(i), self); for (i, child) in &mut self.children.iter_mut().enumerate() { child._bubble_down(i, func); } } /// Run a function on a node, and on all child nodes. Runs on child nodes first, *then* on itself, meaning the function will run on the topmost node last. /// /// ``` /// #[derive(Default)] /// struct Foo { /// bar: u8 /// } /// /// let mut tree = rusx! { /// <Foo bar=10> { /// <bar=20>, /// <bar=30>, /// } /// } /// /// tree.bubble_up(|i: Option<usize>, s: &mut<RusxNode<Foo>| { /// s.props.bar += 1; /// println!("{}", s.props.bar); /// }) /// /// // 21 /// // 31 /// // 11 /// ``` pub fn bubble_up<F>(&mut self, func: F) where F: Fn(Option<usize>, &mut Self), { for (i, child) in &mut self.children.iter_mut().enumerate() { child._bubble_up(i, &func); } (func)(None, self); } fn _bubble_up<F>(&mut self, i: usize, func: &F) where F: Fn(Option<usize>, &mut Self), { for (i, child) in &mut self.children.iter_mut().enumerate() { child._bubble_up(i, func); } (func)(Some(i), self); } }
use sudo_test::{Command, Env, TextFile}; use crate::{helpers, Result, SUDOERS_ALL_ALL_NOPASSWD, USERNAME}; macro_rules! assert_snapshot { ($($tt:tt)*) => { insta::with_settings!({ prepend_module_to_snapshot => false, snapshot_path => "../snapshots/path_search", }, { insta::assert_snapshot!($($tt)*) }); }; } #[test] fn can_find_command_not_visible_to_regular_user() -> Result<()> { let path = "/root/my-script"; let env = Env(SUDOERS_ALL_ALL_NOPASSWD) .user(USERNAME) .file(path, TextFile("#!/bin/sh").chmod("100")) .build()?; Command::new("sh") .args(["-c", "export PATH=/root; cd /; /usr/bin/sudo my-script"]) .as_user(USERNAME) .output(&env)? .assert_success()?; Ok(()) } #[test] fn when_path_is_unset_does_not_search_in_default_path_set_for_command_execution() -> Result<()> { let path = "/usr/bin/my-script"; let env = Env(SUDOERS_ALL_ALL_NOPASSWD) .file(path, TextFile("#!/bin/sh").chmod("777")) .build()?; let default_path = Command::new("sh") .args(["-c", "unset PATH; /usr/bin/sudo /usr/bin/printenv PATH"]) .output(&env)? .stdout()?; // sanity check that `/usr/bin` is in sudo's default PATH let default_path = helpers::parse_path(&default_path); assert!(default_path.contains("/usr/bin")); let output = Command::new("sh") .args(["-c", "unset PATH; /usr/bin/sudo my-script"]) .output(&env)?; assert!(!output.status().success()); assert_eq!(Some(1), output.status().code()); let stderr = output.stderr(); if sudo_test::is_original_sudo() { assert_snapshot!(stderr); } else { assert_contains!(stderr, "'my-script': command not found"); } Ok(()) } #[test] fn ignores_path_for_qualified_commands() -> Result<()> { let path = "/root/my-script"; let env = Env(SUDOERS_ALL_ALL_NOPASSWD) .file(path, TextFile("#!/bin/sh").chmod("100")) .build()?; for param in ["/root/my-script", "./my-script"] { Command::new("sh") .args(["-c", &format!("cd /root; sudo {param}")]) .as_user("root") .output(&env)? .assert_success()?; } Ok(()) } #[test] fn paths_are_matched_using_realpath_in_sudoers() -> Result<()> { let env = Env(["ALL ALL = /bin/true"]).build()?; // this test assumes /bin is a symbolic link for /usr/bin, which is the // case on Debian bookworm; if it fails for original sudo, either change the // dockerfile or explicitly create a symbolic link Command::new("sudo") .arg("/usr/bin/true") .output(&env)? .assert_success()?; Ok(()) } #[test] fn paths_are_matched_using_realpath_in_arguments() -> Result<()> { let env = Env(["ALL ALL = /usr/bin/true"]).build()?; // this test assumes /bin is a symbolic link for /usr/bin, which is the // case on Debian bookworm; if it fails for original sudo, either change the // dockerfile or explicitly create a symbolic link Command::new("sudo") .arg("/bin/true") .output(&env)? .assert_success()?; Ok(()) } #[test] fn arg0_native_is_passed_from_commandline() -> Result<()> { let env = Env(SUDOERS_ALL_ALL_NOPASSWD).build()?; let output = Command::new("sh") .args([ "-c", "ln -s /bin/ls /bin/foo; sudo /bin/foo --invalid-flag; true", ]) .output(&env)?; let stderr = output.stderr(); assert_starts_with!(stderr, "/bin/foo: unrecognized option"); Ok(()) } #[test] fn arg0_native_is_resolved_from_commandline() -> Result<()> { let env = Env(SUDOERS_ALL_ALL_NOPASSWD).build()?; let output = Command::new("sh") .args([ "-c", "ln -s /bin/ls /bin/foo; sudo foo --invalid-flag; true", ]) .output(&env)?; let stderr = output.stderr(); assert_starts_with!(stderr, "foo: unrecognized option"); Ok(()) } #[test] #[ignore = "gh735"] fn arg0_script_is_passed_from_commandline() -> Result<()> { let path = "/bin/my-script"; let env = Env(SUDOERS_ALL_ALL_NOPASSWD) .file(path, TextFile("#!/bin/sh\necho $0").chmod("777")) .build()?; let output = Command::new("sh") .args(["-c", &format!("ln -s {path} /bin/foo; sudo /bin/foo")]) .output(&env)?; let stdout = output.stdout()?; assert_eq!(stdout, "/bin/foo"); Ok(()) } #[test] fn arg0_script_is_resolved_from_commandline() -> Result<()> { let path = "/bin/my-script"; let env = Env(SUDOERS_ALL_ALL_NOPASSWD) .file(path, TextFile("#!/bin/sh\necho $0").chmod("777")) .build()?; let output = Command::new("sh") .args(["-c", &format!("ln -s {path} /bin/foo; sudo foo")]) .output(&env)?; let stdout = output.stdout()?; assert_eq!(stdout, "/usr/bin/foo"); Ok(()) }
use core::ops::Mul; /// Complex and hypercomplex transformation basic trait. pub trait Transform<U> { /// Apply the transformation. fn apply(&self, x: U) -> U; } /// Transformation that has an identity element. pub trait Identity { /// Get an identity element. fn identity() -> Self; } /// Transformation which instances could be chained into another one (i.e. forms a magma). pub trait Chain<U>: Transform<U> { fn chain(self, other: Self) -> Self; } /// Differentiable transformation. pub trait Deriv<U>: Transform<U> { /// Find the derivative of `self` at the specified point `p`. fn deriv(&self, p: U) -> U; } /// Directionally differentiable transformation. pub trait DerivDir<U>: Transform<U> { /// Find the directinal derivative of `self` at the specified point `p` via the specified direction `d`. fn deriv_dir(&self, p: U, d: U) -> U; } impl<U: Mul<U, Output=U>, M> DerivDir<U> for M where M: Deriv<U> { fn deriv_dir(&self, p: U, d: U) -> U { self.deriv(p) * d } }
use glob::{GlobError, PatternError}; use std::ffi::OsString; use std::fmt::{self, Display}; use std::io; use std::path::PathBuf; #[derive(Debug)] pub enum Error { Cargo(io::Error), CargoFail, GetManifest(PathBuf, Box<Error>), Glob(GlobError), Io(io::Error), Metadata(serde_json::Error), Mismatch, NoWorkspaceManifest, Open(PathBuf, io::Error), Pattern(PatternError), ProjectDir, ReadStderr(io::Error), RunFailed, ShouldNotHaveCompiled, Toml(basic_toml::Error), UpdateVar(OsString), WriteStderr(io::Error), } pub type Result<T> = std::result::Result<T, Error>; impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Error::*; match self { Cargo(e) => write!(f, "failed to execute cargo: {}", e), CargoFail => write!(f, "cargo reported an error"), GetManifest(path, e) => write!(f, "failed to read manifest {}: {}", path.display(), e), Glob(e) => write!(f, "{}", e), Io(e) => write!(f, "{}", e), Metadata(e) => write!(f, "failed to read cargo metadata: {}", e), Mismatch => write!(f, "compiler error does not match expected error"), NoWorkspaceManifest => write!(f, "Cargo.toml uses edition.workspace=true, but no edition found in workspace's manifest"), Open(path, e) => write!(f, "{}: {}", path.display(), e), Pattern(e) => write!(f, "{}", e), ProjectDir => write!(f, "failed to determine name of project dir"), ReadStderr(e) => write!(f, "failed to read stderr file: {}", e), RunFailed => write!(f, "execution of the test case was unsuccessful"), ShouldNotHaveCompiled => { write!(f, "expected test case to fail to compile, but it succeeded") } Toml(e) => write!(f, "{}", e), UpdateVar(var) => write!( f, "unrecognized value of TRYBUILD: {:?}", var.to_string_lossy(), ), WriteStderr(e) => write!(f, "failed to write stderr file: {}", e), } } } impl Error { pub fn already_printed(&self) -> bool { use self::Error::*; matches!( self, CargoFail | Mismatch | RunFailed | ShouldNotHaveCompiled ) } } impl From<GlobError> for Error { fn from(err: GlobError) -> Self { Error::Glob(err) } } impl From<PatternError> for Error { fn from(err: PatternError) -> Self { Error::Pattern(err) } } impl From<io::Error> for Error { fn from(err: io::Error) -> Self { Error::Io(err) } } impl From<basic_toml::Error> for Error { fn from(err: basic_toml::Error) -> Self { Error::Toml(err) } }
//use tokio_io::{try_nb, AsyncRead, AsyncWrite}; // //use super::tokio_tls; //use super::tokio_tls::client; //use super::tokio_tls::entry; //use super::tokio_tls::server; //use core::fmt::Pointer; //use futures::{Async, Future, Poll}; //use rustls::{ClientConfig, ClientSession, ServerConfig, ServerSession, Stream}; //use std::fmt; //use std::io::{Error, ErrorKind}; //use std::result; //use std::sync::Arc; //use std::{io, mem}; //use webpki::DNSNameRef; // //pub enum Identity { // Server(Arc<ServerConfig>), // Client(Arc<ClientConfig>), //} // //pub enum MidHandshakeTlsStream<S> { // Server(server::MidHandshake<S>), // Client(client::MidHandshake<S>), //} // //impl<S: AsyncRead + AsyncWrite> Future for MidHandshakeTlsStream<S> { // type Item = TlsStream<S>; // type Error = io::Error; // // fn poll(&mut self) -> Poll<Self::Item, Self::Error> { // match self { // MidHandshakeTlsStream::Server(s) => match s.poll() { // Ok(t) => match t { // Async::Ready(r) => Ok(Async::Ready(TlsStream::Server(r))), // Async::NotReady => Ok(Async::NotReady), // }, // // Err(e) => Err(e), // }, // MidHandshakeTlsStream::Client(s) => match s.poll() { // Ok(t) => match t { // Async::Ready(r) => Ok(Async::Ready(TlsStream::Client(r))), // Async::NotReady => Ok(Async::NotReady), // }, // Err(e) => Err(e), // }, // } // } //} // //pub struct TlsConnector { // connector: entry::TlsConnector, //} // //pub struct TlsConnectorBuilder {} // //impl TlsConnectorBuilder { // pub fn new(identity: Identity) -> Result<TlsConnector, Error> { // match identity { // Identity::Client(s) => Ok(TlsConnector { // connector: entry::TlsConnector::from(s), // }), // _ => Err(io::Error::new(ErrorKind::Other, "oh no!")), // } // } //} // //impl TlsConnector { // pub fn connect<S>(&self, domain: DNSNameRef, stream: S) -> MidHandshakeTlsStream<S> // where // S: AsyncRead + AsyncWrite, // { // let mut session = ClientSession::new(&self.connector.inner, domain); // MidHandshakeTlsStream::Client(client::MidHandshake::Handshaking(client::TlsStream { // session, // io: stream, // state: entry::TlsState::Stream, // })) // } //} // //pub struct TlsAcceptor { // acceptor: entry::TlsAcceptor, //} // //impl TlsAcceptor { // pub fn accept<S>(&self, stream: S) -> MidHandshakeTlsStream<S> // where // S: AsyncRead + AsyncWrite, // { // let mut session = ServerSession::new(&self.acceptor.inner); // // MidHandshakeTlsStream::Server(server::MidHandshake::Handshaking(server::TlsStream { // session, // io: stream, // state: entry::TlsState::Stream, // })) // } //} // //pub struct TlsAcceptorBuilder {} // //impl TlsAcceptorBuilder { // pub fn new(identity: Identity) -> Result<TlsAcceptor, Error> { // match identity { // Identity::Server(s) => Ok(TlsAcceptor { // acceptor: (entry::TlsAcceptor::from(s)), // }), // _ => Err(io::Error::new(ErrorKind::Other, "oh no!")), // } // } //} // //pub enum TlsStream<S> { // Server(server::TlsStream<S>), // Client(client::TlsStream<S>), //} // //pub enum TlsSession { // Server(ServerSession), // Client(ClientSession), //} // //impl<S> fmt::Debug for TlsStream<S> //where // S: fmt::Debug, //{ // fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { // match *self { // TlsStream::Server(ref s) => s.fmt(fmt), // TlsStream::Client(ref s) => s.fmt(fmt), // } // } //} // //impl<S: AsyncRead + AsyncWrite> TlsStream<S> { // /// Shuts down the TLS session. // pub fn shutdown(&mut self) -> Poll<(), io::Error> { // match *self { // TlsStream::Server(ref mut s) => s.shutdown(), // TlsStream::Client(ref mut s) => s.shutdown(), // } // } //} // //impl<S: AsyncRead + AsyncWrite> io::Read for TlsStream<S> { // fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { // match *self { // TlsStream::Server(ref mut s) => s.read(buf), // TlsStream::Client(ref mut s) => s.read(buf), // } // } //} // //impl<S: AsyncRead + AsyncWrite> io::Write for TlsStream<S> { // fn write(&mut self, buf: &[u8]) -> io::Result<usize> { // match *self { // TlsStream::Server(ref mut s) => s.write(buf), // TlsStream::Client(ref mut s) => s.write(buf), // } // } // // fn flush(&mut self) -> io::Result<()> { // match *self { // TlsStream::Server(ref mut s) => s.flush(), // TlsStream::Client(ref mut s) => s.flush(), // } // } //}
// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use serde::Deserialize; use serde::Serialize; use serde_json::json; use std::collections::HashMap; use std::io; use std::sync::mpsc::channel; use std::sync::mpsc::Sender; use std::sync::Arc; use std::sync::Mutex; use std::sync::Weak; /// The Read trait allows for reading utf-8 packets from a source. pub trait Read { fn read_packet(&mut self) -> Result<String, io::Error>; } /// The Write trait allows for writing utf-8 packets to a destination. pub trait Write { fn write_packet(&self, packet: String) -> Result<(), io::Error>; } pub enum Message { Request(Request), Notification(Notification), } pub struct Request { pub method: String, pub params: serde_json::Value, pub response_handle: ResponseHandle, } pub struct Notification { pub method: String, pub params: serde_json::Value, } pub struct ResponseHandle { id: Id, writer: Arc<Mutex<dyn Write + Send>>, } impl ResponseHandle { pub fn respond(self, response: Result<serde_json::Value, serde_json::Value>) { // TODO: Improve error handling if responding fails. self.writer .lock() .unwrap() .write_packet(match response { Ok(result) => { json!({ "jsonrpc": "2.0", "id": self.id, "result": result}).to_string() } Err(error) => json!({ "jsonrpc": "2.0", "id": self.id, "error": error}).to_string(), }) .unwrap(); } } /// The LspSender allows to send messages (requests and notification) to the client. pub struct LspSender { next_id: Arc<Mutex<Counter>>, writer: Arc<Mutex<dyn Write + Send>>, running_requests: Weak<Mutex<MyMap>>, } impl LspSender { pub fn send_notification(&self, method: &str, params: serde_json::Value) { // TODO: how to properly handle errors here? self.writer .lock() .unwrap() .write_packet( json!( { "jsonrpc": "2.0", "method": method, "params": params} ) .to_string(), ) .unwrap(); } pub fn send_request( &self, method: &str, params: serde_json::Value, ) -> Result<serde_json::Value, serde_json::Value> { let running_requests = match self.running_requests.upgrade() { Some(x) => x, None => panic!("failed to upgrade running_requests"), }; let id: Id = Id::Number(self.next_id.lock().unwrap().next()); let (sender, receiver) = channel(); running_requests.lock().unwrap().insert(id.clone(), sender); // TODO: how to properly handle errors here? self.writer .lock() .unwrap() .write_packet( json!( { "jsonrpc": "2.0", "id": id, "method": method, "params": params} ) .to_string(), ) .unwrap(); return receiver.recv().unwrap(); } } type ResultOrError = Result<serde_json::Value, serde_json::Value>; type MyMap = HashMap<Id, Sender<ResultOrError>>; #[derive(Debug, PartialEq, Clone, Hash, Eq, Deserialize, Serialize)] #[serde(untagged)] enum Id { Number(i64), String(String), } /// Basic implementation of LSP Server. /// /// Server is responsible for abstracting the communication between the client and the server. The /// server: /// * serializes and deserializes packets into proper LSP Messages, /// * hides the concept of request ID by providing APIs to send new requests, reply to messages /// and receive responses, /// * handles the `exit` notification, to stop the iterator from receiving any more messages. /// /// It exits after receiving `exit` notification and forwards all other requests and responses to /// handler passed in run method. /// /// TODO: Server also returns when error is encountered, but errors are not properly reported yet. pub struct Server<R: Read, W: Write> { reader: R, writer: Arc<Mutex<W>>, // Map of requests that are currently waiting for the response from client. running_requests: Arc<Mutex<MyMap>>, } impl<R, W> Iterator for Server<R, W> where R: Read, W: Write + Send + 'static, { type Item = Message; fn next(&mut self) -> Option<Message> { loop { let packet = match self.reader.read_packet() { Ok(packet) => packet, // TODO: Save the error Err(_) => return None, }; let json: serde_json::Value = match serde_json::from_str(&packet) { Ok(value) => value, // TODO: We should probably reply with error? Err(_) => return None, }; match &json { serde_json::Value::Object(map) => { if let Some(serde_json::Value::String(method)) = map.get("method") { if method == "exit" { return None; } if let Some(id_val) = map.get("id") { let id: Id = serde_json::from_value(id_val.clone()).unwrap(); return Some(Message::Request(Request { method: method.to_string(), params: json["params"].clone(), response_handle: ResponseHandle { id: id, writer: self.writer.clone(), }, })); } return Some(Message::Notification(Notification { method: method.to_string(), params: json["params"].clone(), })); } if let Some(result) = map.get("result") { if let Some(id_val) = map.get("id") { let id: Id = serde_json::from_value(id_val.clone()).unwrap(); self.running_requests.lock().unwrap()[&id] .send(Ok(result.clone())) .unwrap(); continue; } } // TODO: I think we should just respond with error here. return None; } // TODO: I think we should just respond with error here. _ => return None, } } } } impl<R, W> Server<R, W> where R: Read, W: Write + Send + 'static, { pub fn new(reader: R, writer: W) -> Server<R, W> { return Server { reader: reader, writer: Arc::new(Mutex::new(writer)), running_requests: Arc::new(Mutex::new(HashMap::new())), }; } pub fn sender(&self) -> LspSender { return LspSender { next_id: Arc::new(Mutex::new(Counter::new())), writer: self.writer.clone(), running_requests: Arc::downgrade(&self.running_requests), }; } } struct Counter { id: i64, } impl Counter { fn new() -> Counter { Counter { id: 0 } } fn next(&mut self) -> i64 { self.id += 1; return self.id; } } #[cfg(test)] mod tests { use super::*; use std::sync::mpsc::Receiver; struct FakeReader { receiver: Receiver<String>, } impl FakeReader { fn new() -> (Sender<String>, FakeReader) { let (sender, receiver) = channel(); return (sender, FakeReader { receiver: receiver }); } } impl Read for FakeReader { fn read_packet(&mut self) -> Result<String, io::Error> { self.receiver .recv() .map_err(|_| io::Error::new(io::ErrorKind::UnexpectedEof, "EOF encountered")) } } struct FakeWriter { sender: Sender<String>, } impl FakeWriter { fn new() -> (Receiver<String>, FakeWriter) { let (sender, receiver) = channel(); return (receiver, FakeWriter { sender: sender }); } } impl Write for FakeWriter { fn write_packet(&self, packet: String) -> Result<(), io::Error> { self.sender.send(packet).unwrap(); Ok(()) } } struct Client { sender: Sender<String>, receiver: Receiver<String>, } impl Client { fn recv(&self) -> Result<serde_json::Value, ()> { Ok(self.receiver.recv().unwrap().parse().unwrap()) } fn send(&self, req: serde_json::Value) -> Result<(), ()> { self.sender.send(req.to_string()).unwrap(); Ok(()) } } fn exit_notification() -> serde_json::Value { return json!({ "jsonrpc": "2.0", "method": "exit", }); } fn create_client_and_server() -> (Client, Server<FakeReader, FakeWriter>) { let (writer_ch, writer) = FakeWriter::new(); let (reader_ch, reader) = FakeReader::new(); let client = Client { sender: reader_ch, receiver: writer_ch, }; let server = Server::new(reader, writer); return (client, server); } #[test] fn server_exits_after_exit_notification() { let (client, server) = create_client_and_server(); client.send(exit_notification()).unwrap(); assert_eq!(server.count(), 0); } #[test] fn server_exits_when_reader_returns_eof() { let (client, server) = create_client_and_server(); std::mem::drop(client); assert_eq!(server.count(), 0); } #[test] fn server_receives_notifications() { let notification = json!({ "jsonrpc": "2.0", "method": "someMethod", "params": { "key": "value", } }); let (client, mut server) = create_client_and_server(); client.send(notification.clone()).unwrap(); client.send(exit_notification()).unwrap(); let message = server.next().unwrap(); match message { Message::Notification(n) => { assert_eq!(n.method, "someMethod"); assert_eq!(n.params, json!({"key": "value"})); } _ => panic!("invalid message received, want notification"), } } #[test] fn server_receives_requests() { let request = json!({ "jsonrpc": "2.0", "id": 1, "method": "someMethod", "params": { "key": "value", } }); let (client, mut server) = create_client_and_server(); client.send(request.clone()).unwrap(); client.send(exit_notification()).unwrap(); let message = server.next().unwrap(); match message { Message::Request(r) => { assert_eq!(r.method, "someMethod"); assert_eq!(r.params, json!({"key": "value"})); r.response_handle.respond(Ok(json!({"my": "response"}))); } _ => panic!("invalid message received, want request"), } assert_eq!( client.recv().unwrap(), json!({ "jsonrpc": "2.0", "id": 1, "result": { "my": "response", } }) ) } #[test] fn server_can_send_notifications() { let (client, server) = create_client_and_server(); server .sender() .send_notification("someMethod", json!({"key": "value"})); assert_eq!( client.recv().unwrap(), json!({ "jsonrpc": "2.0", "method": "someMethod", "params": { "key": "value", }} ) ); } #[test] fn server_can_send_requests() { let (client, server) = create_client_and_server(); let sender = server.sender(); let t = std::thread::spawn(move || { let res = sender .send_request("someMethod", json!({"key": "value"})) .unwrap(); assert_eq!(res, json!({"key1": "value1"})); }); let t2 = std::thread::spawn(move || { // Just consume all items. server.count(); }); assert_eq!( client.recv().unwrap(), json!({ "jsonrpc": "2.0", "id": 1, "method": "someMethod", "params": { "key": "value", }}) ); client .send(json!({ "jsonrpc": "2.0", "id": 1, "result": { "key1": "value1", } })) .unwrap(); client .send(json!({ "jsonrpc": "2.0", "method": "exit", })) .unwrap(); t.join().unwrap(); t2.join().unwrap(); } }
use std::env; use std::fs; use std::io::{self, BufRead, Write}; mod scanner; mod token; use crate::scanner::*; use crate::token::*; fn run(source: &str) { //println!("{} lines of source", source.lines().count()); let tokens: Vec<Result<Token, ScannerError>> = Scanner::new(source.chars()).collect(); println!("{} tokens from source", tokens.len()); for tok in tokens.iter() { if let Ok(tok) = tok { println!(" {}", tok); } else { println!(" {:?}", tok); } } } fn run_file(path: &str) { let source = fs::read_to_string(path).expect("Really shouldn't let this stay"); run(&source); } fn run_prompt() { let stdin = io::stdin(); let mut buffer = String::new(); let mut handle = stdin.lock(); loop { print!("> "); io::stdout().flush().unwrap(); let chars_read = handle.read_line(&mut buffer).unwrap(); if chars_read == 0 { break; } let line = buffer.trim(); run(line); println!("->{}", line); //io::stdout().flush().unwrap(); buffer.clear(); } } fn main() { let args: Vec<String> = env::args().collect(); println!("Hello, Welcome to Lox in Rust!"); match args.len() { 1 => { println!("Entering interactive mode(CTRL+D to exit):"); run_prompt(); println!("...Exiting"); } 2 => { println!("processing file: {}", args[1]); run_file(&args[1]) } _ => println!("USAGE: {} [script-file]", args[0]), } }
#![cfg(test)] use problem1::{sum, dedup, filter}; use problem2::mat_mult; use problem3::sieve; use problem4::{hanoi, Peg}; // // Problem 1 // // Part 1 #[test] fn test_sum_small() { let array = [1,2,3,4,5]; assert_eq!(sum(&array), 15); } // Part 2 #[test] fn test_dedup_small() { let vs = vec![1,2,2,3,4,1]; assert_eq!(dedup(&vs), vec![1,2,3,4]); } // Part 3 fn even_predicate(x: i32) -> bool { (x % 2) == 0 } #[test] fn test_filter_small() { let vs = vec![1,2,3,4,5]; assert_eq!(filter(&vs, &even_predicate), vec![2,4]); } // // Problem 2 // #[test] fn test_mat_mult_identity() { let mut mat1 = vec![vec![0.;3]; 3]; for i in 0..mat1.len() { mat1[i][i] = 1.; } let mat2 = vec![vec![5.;3]; 3]; let result = mat_mult(&mat1, &mat2); for i in 0..result.len() { for j in 0..result[i].len() { assert_eq!(result[i][j], mat2[i][j]); } } } #[test] fn test_mat_mult_2x2() { let mat1 = vec![vec![2., 3.], vec![2., 5.]]; let mat2 = vec![vec![3., 5.], vec![6., 2.]]; let matres = vec![vec![24.,16.], vec![36.,20.]]; let result = mat_mult(&mat1, &mat2); println!("result is {} x {}", result.len(), result[0].len()); for i in 0..result.len() { for j in 0..result[i].len() { assert_eq!(result[i][j], matres[i][j]); } } } // // Problem 3 // #[test] fn test_sieve_basic() { assert_eq!(vec![2,3,5,7,11], sieve(12)); } // // Problem 4 // #[test] fn test_hanoi_1_disks() { let result = hanoi(1, Peg::A, Peg::B, Peg::C); assert_eq!(vec![(Peg::A, Peg::C)], result); assert_eq!(1, result.len()); } #[test] fn test_hanoi_2_disks() { let result = hanoi(2, Peg::A, Peg::B, Peg::C); assert_eq!(vec![(Peg::A, Peg::B), (Peg::A, Peg::C), (Peg::B, Peg::C)], result); }
/// This module handles the deserialization of the humble bundle monthly trove metadata feed. /// It provides operations that deal with the contents of the feed itself. use crate::cache::Cache; use chrono::{NaiveDateTime, Utc}; use failure::Error; use log::{debug, info, warn}; use select::{document::Document, predicate::Attr}; use serde::Deserialize; use serde_json::Value; use std::collections::HashMap; use std::fs::File; use std::io::{Read, Write}; use std::path::PathBuf; use std::str; #[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct TimerOptions { #[serde(rename = "currentTime|datetime")] pub current_time: String, #[serde(rename = "nextAdditionTime|datetime")] pub next_addition_time: String, } #[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "snake_case")] pub struct Url { pub web: String, pub bittorrent: Option<String>, } #[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "snake_case")] pub struct Download { //pub uploaded_at: Option<String>, pub machine_name: String, pub name: String, pub url: Url, pub file_size: u64, //pub small: Option<u8>, pub md5: String, //pub sha1: Option<String>, pub size: Option<String>, //pub timestamp: Option<u64>, } #[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "kebab-case")] pub struct CarouselContent { pub youtube_link: Option<Vec<String>>, pub thumbnail: Vec<String>, pub screenshot: Vec<String>, } #[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "kebab-case")] pub struct Publisher { pub publisher_name: String, pub publisher_uri: Option<String>, } #[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "kebab-case")] pub struct Developer { pub developer_name: String, pub developer_url: Option<String>, } #[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "kebab-case")] pub struct Product { pub all_access: bool, pub background_image: Option<String>, // can be null pub background_color: Option<String>, // can be null pub carousel_content: CarouselContent, pub date_added: u32, pub description_text: String, pub developers: Option<Vec<Developer>>, pub downloads: HashMap<String, Download>, pub human_name: String, pub humble_original: Option<bool>, // can be null pub image: String, pub logo: Option<String>, #[serde(rename = "machine_name")] pub machine_name: String, pub marketing_blurb: Value, //Map {text, style} or String, pub popularity: u16, pub publishers: Value, // can be null Vec<Publisher>, pub trove_showcase_css: Option<String>, // can be null pub youtube_link: Option<String>, // can be null } trait ProductVec { fn contains(&self, machine_name: &str) -> bool; } impl ProductVec for Vec<Product> { fn contains(&self, human_name: &str) -> bool { for product in self.iter() { if product.human_name == human_name { return true; } } return false; } } #[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Feed { pub all_access: Vec<String>, pub download_platform_order: Vec<String>, pub newly_added: Vec<Product>, // pub display_item_data: Value, pub countdown_timer_options: TimerOptions, pub standard_products: Vec<Product>, //pub chunks: u8, //pub games_per_chunk: u8, } impl Feed { fn newest_to_oldest(&mut self) { self.standard_products.sort_by_key(|p| p.date_added); self.standard_products.reverse(); } fn alphabetically(&mut self) { self.standard_products .sort_by_key(|p| p.human_name.to_lowercase()); } fn images(&self) -> Vec<&str> { self.standard_products .iter() .map(|product| product.image.as_str()) .collect() } } trait TroveCache { fn chunk_url(&self, i: usize) -> String; fn trove_url(&self) -> &'static str; fn feed_doc(&self) -> Result<Value, Error>; fn chunks(&self, root: &Value) -> usize; fn get_trove_feed(&self) -> Result<Value, Error>; fn invalidate(&self) -> Result<(), Error>; } impl TroveCache for Cache { fn chunk_url(&self, i: usize) -> String { format!( "https://www.humblebundle.com/api/v1/trove/chunk?property=start&direction=desc&index={}", i ) } fn trove_url(&self) -> &'static str { "https://www.humblebundle.com/subscription/trove" } fn feed_doc(&self) -> Result<Value, Error> { let text = self.retrieve(self.trove_url())?; let doc = Document::from(str::from_utf8(&text)?); let data = doc .find(Attr("id", "webpack-monthly-trove-data")) .next() .unwrap() .text(); let root: Value = serde_json::from_str(data.as_str())?; Ok(root) } fn chunks(&self, root: &Value) -> usize { debug!("Extracting number of chunks"); let chunks: usize = match &root["chunks"] { Value::Number(number) => { number.as_u64().expect("Unable to convert chunks to u64") as usize } _ => panic!("Unable to get chunks value!"), }; chunks } fn get_trove_feed(&self) -> Result<Value, Error> { let mut root = self.feed_doc()?; let chunks = self.chunks(&root); debug!("Getting product list"); let mut products = Vec::new(); // match root // .get_mut("standardProducts") // .expect("Unable to get product list") // { // Value::Array(array) => array, // _ => panic!("Unexpected value in standard_products field"), // }; for i in 0..chunks { let bytes = self.retrieve(self.chunk_url(i).as_str())?; let chunk: Vec<Value> = serde_json::from_str(str::from_utf8(&bytes)?)?; products.extend(chunk); } root.as_object_mut() .expect("Unable to get root") .insert("standardProducts".to_string(), Value::Array(products)); Ok(root) } fn invalidate(&self) -> Result<(), Error> { // This is a bit weird. We retrieve the cached value only to determine // how many chunk urls we need to invalidate. This is needed because we // do not save the extracted chunk value in our exports. let root = self.feed_doc()?; let chunks = self.chunks(&root); self.invalidate(self.trove_url())?; for i in 0..chunks { self.invalidate(self.chunk_url(i).as_str())?; } Ok(()) } } pub struct TroveFeed { cache: Cache, json: String, feed: Feed, } impl TroveFeed { pub fn new(cache: Cache, dir: &PathBuf) -> Result<TroveFeed, Error> { let root = cache.get_trove_feed()?; let json = serde_json::to_string_pretty(&root)?; let mut trove_feed = TroveFeed { cache, json, feed: serde_json::from_value(root)?, }; if trove_feed.expired() { eprintln!("Refreshing expired cache."); TroveCache::invalidate(&trove_feed.cache)?; return TroveFeed::new(trove_feed.cache, dir); } let mut products: Vec<String> = Vec::new(); // Dedup the list trove_feed.feed.standard_products.retain(|p| { if !products.contains(&p.machine_name) { products.push(p.machine_name.clone()); return true; } return false; }); let newly_added = &trove_feed.feed.newly_added; let standard_products = &mut trove_feed.feed.standard_products; newly_added.iter().for_each(|p| { if !products.contains(&p.machine_name) { products.push(p.machine_name.clone()); standard_products.push(p.clone()); } }); trove_feed.feed.alphabetically(); trove_feed.save(&dir.join("trove_feed.json"))?; trove_feed.backup(dir)?; Ok(trove_feed) } pub fn expired(&self) -> bool { let expiration = NaiveDateTime::parse_from_str( &self.feed.countdown_timer_options.next_addition_time, "%Y-%m-%dT%H:%M:%S%.f", ) .expect("Error parsing nextAdditionTime"); debug!("Expiration: {}", expiration); if Utc::now().timestamp() > expiration.timestamp() { return true; } return false; } pub fn cache_images(&self) { self.feed.images().iter().for_each(|image| { if let Err(err) = self.cache.retrieve(image) { warn!("{}", err); } }); self.cache_screenshots(); self.cache_thumbnails(); } pub fn cache_thumbnails(&self) { (&self.feed.standard_products) .iter() .flat_map(|p| &p.carousel_content.thumbnail) .for_each(|url| { if let Err(err) = self.cache.retrieve(url.as_str()) { warn!("{}", err); } }); } pub fn cache_screenshots(&self) { (&self.feed.standard_products) .iter() .flat_map(|p| &p.carousel_content.screenshot) .for_each(|url| { if let Err(err) = self.cache.retrieve(url.as_str()) { warn!("{}", err); } }); } pub fn load(cache: Cache, path: &PathBuf) -> Result<TroveFeed, Error> { let mut json = String::new(); let mut file = File::open(path)?; file.read_to_string(&mut json)?; let mut feed: Feed = serde_json::from_str(&json)?; let mut products: Vec<String> = Vec::new(); feed.standard_products.retain(|p| { if !products.contains(&p.machine_name) { products.push(p.machine_name.clone()); return true; } return false; }); let newly_added = &feed.newly_added; let standard_products = &mut feed.standard_products; newly_added.iter().for_each(|p| { if !products.contains(&p.machine_name) { products.push(p.machine_name.clone()); standard_products.push(p.clone()); } }); feed.alphabetically(); Ok(TroveFeed { cache, json, feed }) } pub fn save(&self, path: &PathBuf) -> Result<(), Error> { let mut file = File::create(path)?; file.write(self.json.as_bytes())?; Ok(()) } pub fn backup(&self, dir: &PathBuf) -> Result<(), Error> { let filename = Utc::now().format("trove_feed-%Y-%m-%d.json").to_string(); info!("Creating backup: {}.", &filename); self.save(&dir.join(filename))?; Ok(()) } pub fn diff(&self, older: TroveFeed) { let mut new_names = Vec::new(); for product in &self.feed.standard_products { if !older.feed.standard_products.contains(&product.human_name) { new_names.push(product.human_name.clone()); } } let mut old_names = Vec::new(); for product in older.feed.standard_products { if !&self.feed.standard_products.contains(&product.human_name) { old_names.push(product.human_name.clone()); } } println!("Added titles:"); println!("-------------"); new_names.iter().for_each(|name| println!("{}", name)); println!(""); println!("Deleted titles:"); println!("---------------"); old_names.iter().for_each(|name| println!("{}", name)); } pub fn products(&self) -> &Vec<Product> { &self.feed.standard_products } pub fn sort_newest_to_oldest(&mut self) { self.feed.newest_to_oldest(); } pub fn sort_alphabetically(&mut self) { self.feed.alphabetically(); } }
use generic_array::GenericArray as Array; use typenum::*; use super::*; #[inline] pub fn translate<A: Copy, N>(a: Matrix<A, N>) -> Matrix<A, Add1<N>, Add1<N>> where A: Zero + One, N: Add<B1> + ArrayLength<A>, Add1<N>: ArrayLength<A> + ArrayLength<GenericArray<A, Add1<N>>> { unsafe { let mut c = mem::MaybeUninit::<Array<Array<A, Add1<N>>, Add1<N>>>::uninit(); for i in 0..N::to_usize() + 1 { for j in 0..N::to_usize() + 1 { ptr::write(&mut c.getMut()[i][j], if i == j { A::one } else if N::to_usize() == i { a[j] } else { A::zero }); } } Matrix(c.assume_init()) } } /// Homomorphism from GL(N, A) to PGL(N+1, A) #[inline] pub fn transform_linear<A: Copy, N>(a: Matrix<A, N, N>) -> Matrix<A, Add1<N>, Add1<N>> where A: Zero + One, N: Add<B1> + ArrayLength<A> + ArrayLength<GenericArray<A, N>>, Add1<N>: ArrayLength<A> + ArrayLength<GenericArray<A, Add1<N>>> { unsafe { let Matrix(a) = a; let mut c = mem::MaybeUninit::<Array<Array<A, Add1<N>>, Add1<N>>>::uninit(); for i in 0..N::to_usize() + 1 { for j in 0..N::to_usize() + 1 { ptr::write(&mut c.getMut()[i][j], if i < N::to_usize() && j < N::to_usize() { a[i][j] } else if i == j { A::one } else { A::zero }); } } Matrix(c.assume_init()) } } #[cfg(test)] mod tests { use generic_array::ArrayLength; use typenum::*; use ::*; use super::*; fn test_transform_linear_homomorphic<A: Copy, N>(a: Matrix<A, N, N>, b: Matrix<A, N, N>) -> bool where A: Zero + Mul + One, A::Output: Copy + PartialEq + Zero + AddAssign + One, N: Add<B1> + ArrayLength<A> + ArrayLength<GenericArray<A, N>> + ArrayLength<A::Output> + ArrayLength<GenericArray<A::Output, N>>, Add1<N>: ArrayLength<A> + ArrayLength<GenericArray<A, Add1<N>>> + ArrayLength<A::Output> + ArrayLength<GenericArray<A::Output, Add1<N>>>, Matrix<A, N, N>: Copy { transform_linear(a*b) == transform_linear(a)*transform_linear(b) } #[quickcheck] fn transform_linear_homomorphic_4by4_isize(a: Matrix<isize, U4, U4>, b: Matrix<isize, U4, U4>) -> bool { test_transform_linear_homomorphic(a, b) } }
pub struct SelectList<T> { payload: Vec<T>, selected_idx: usize, } impl<T> SelectList<T> { pub fn new(payload: Vec<T>, selected_idx: usize) -> Self { Self { payload, selected_idx, } } pub fn selected_idx(&self) -> usize { self.selected_idx } pub fn set_selected_idx(&mut self, idx: usize) { self.selected_idx = idx; } pub fn selected(&self) -> Option<&T> { self.payload.get(self.selected_idx) } pub fn selected_mut(&mut self) -> Option<&mut T> { self.payload.get_mut(self.selected_idx) } pub fn remove(&mut self, idx: usize) -> Option<T> { if self.payload.len() > idx { if self.selected_idx + 1 == self.payload.len() && self.selected_idx > 0 { self.selected_idx -= 1; } Some(self.payload.remove(idx)) } else { None } } pub fn push(&mut self, v: T) { if self.payload.len() < 1 { self.payload.push(v); self.selected_idx = 0; } else { self.payload.push(v); } } } impl<T> std::ops::Deref for SelectList<T> { type Target = Vec<T>; fn deref(&self) -> &Self::Target { &self.payload } } impl<T> std::ops::DerefMut for SelectList<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.payload } } impl<T> Clone for SelectList<T> where T: Clone, { fn clone(&self) -> Self { Self { payload: self.payload.iter().map(|x| T::clone(x)).collect(), selected_idx: self.selected_idx, } } }
//! `iui`, the `i`mproved `u`ser `i`nterface crate, is a **simple** (about 4 kLOC of Rust), **small** (about 800kb, including `libui`), **easy to distribute** (one shared library) GUI library, providing a **Rusty** user interface library that binds to **native APIs** via the [libui](https://github.com/andlabs/libui) and the `ui-sys` bindings crate. //! `iui` wraps native retained mode GUI libraries, like Win32API on Windows, Cocoa on Mac OS X, and GTK+ on Linux and elsewhere. Thus all `iui` apps have a native look and feel and start from a highly performant base which is well integegrated with the native ecosystem on each platform. Because it implements only the least common subset of these platform APIs, your apps will work on all platforms and won't have significant behavioral inconsistencies, with no additional effort on your part. //! //! To use the library, add the following to your `Cargo.toml`: //! //! ```toml //! "iui" = "0.3" //! ``` //! //! To build a GUI app with `iui`, you must: //! 1. create a [`UI`](https://docs.rs/iui/*/iui/struct.UI.html#method.init) handle, initializing the UI library and guarding against memory unsafety //! 1. make a [window](https://docs.rs/iui/*/iui/controls/struct.Window.html), or a few, with title and platform-native decorations, into which your app will be drawn //! 1. add all your [controls](https://docs.rs/iui/*/iui/controls/index.html), like buttons and text inputs, laid out with both axial and grid layout options //! 1. implement some [callbacks](https://docs.rs/iui/*/iui/controls/struct.Button.html#method.on_clicked) for user input, taking full advantage of Rust's concurrency protections //! 1. call [`UI::main`](https://docs.rs/iui/*/iui/struct.UI.html#method.main), or take control over the event processing with an [`EventLoop`](https://docs.rs/iui/*/iui/struct.EventLoop.html), and voíla! A GUI! //! //! For code examples, see the [examples](https://github.com/rust-native-ui/libui-rs/blob/trunk/iui/examples/) //! directory. #[macro_use] extern crate bitflags; #[macro_use] extern crate failure; extern crate libc; extern crate ui_sys; mod callback_helpers; mod compile_tests; pub mod controls; pub mod draw; mod error; mod ffi_tools; pub mod menus; pub mod str_tools; mod ui; pub use error::UIError; pub use ui::{EventLoop, UI}; /// Common imports are packaged into this module. It's meant to be glob-imported: `use iui::prelude::*`. pub mod prelude { pub use controls::LayoutStrategy; pub use controls::{NumericEntry, TextEntry}; pub use controls::{Window, WindowType}; pub use ui::UI; }
pub fn bubble_sort<T: PartialOrd>(list: &mut [T]) { let size = list.len(); for i in 0..(size - 1) { let mut swapped = false; for j in 0..(size - 1 - i) { if list[j] > list[j + 1] { list.swap(j, j + 1); swapped = true; } } if !swapped { break; } } } pub fn selection_sort<T: PartialOrd>(list: &mut [T]) { let size = list.len(); for i in 0..(size - 1) { let mut min_index = i; for j in (i + 1)..(size) { if list[j] < list[min_index] { min_index = j; } } list.swap(min_index, i); } } pub fn insertion_sort<T: PartialOrd + Copy>(list: &mut [T]) { for i in 1..(list.len()) { let key = list[i]; let mut j = (i - 1) as i32; while j >= 0 && list[j as usize] > key { list[(j + 1) as usize] = list[j as usize]; j -= 1; } list[(j + 1) as usize] = key; } } fn merge<T: PartialOrd + Copy>(first_half: &[T], second_half: &[T], result: &mut Vec<T>) { let s1 = first_half.len(); let s2 = second_half.len(); let mut i1 = 0_usize; let mut i2 = 0_usize; loop { if i1 >= s1 { while i2 < s2 { result.push(second_half[i2]); i2 += 1; } break; } else if i2 >= s2 { while i1 < s1 { result.push(first_half[i1]); i1 += 1; } break; } if first_half[i1] <= second_half[i2] { result.push(first_half[i1]); i1 += 1; } else { result.push(second_half[i2]); i2 += 1; } } } fn recursive_merge_sort<T: PartialOrd + Copy>(list: &[T], result: &mut Vec<T>) { let size = list.len(); if size == 1 { result.push(list[0]); return; } let middle = size / 2; let mut first_half = Vec::with_capacity(middle); recursive_merge_sort(&list[..middle], &mut first_half); let mut second_half = Vec::with_capacity(size - middle); recursive_merge_sort(&list[middle..], &mut second_half); let mut acc: Vec<T> = Vec::with_capacity(size); merge(&first_half, &second_half, &mut acc); for i in acc { result.push(i); } } pub fn merge_sort<T: PartialOrd + Copy>(list: &mut [T]) { let mut result: Vec<T> = Vec::with_capacity(list.len()); recursive_merge_sort(list, &mut result); list.copy_from_slice(&result); } fn partition<T: PartialOrd + Copy>(list: &mut [T], left_most: i32, right_most: i32) -> i32 { let pivot = list[right_most as usize]; let mut i = left_most - 1; let rm_usize = right_most as usize; let lm_usize = left_most as usize; for j in lm_usize..(rm_usize + 1) { if list[j] < pivot { i += 1; list.swap(i as usize, j); } } list.swap((i + 1) as usize, rm_usize); return i + 1; } fn recursive_quick_sort<T: PartialOrd + Copy>(list: &mut [T], left_most: i32, right_most: i32) { if left_most < right_most { let p_index = partition(list, left_most, right_most); recursive_quick_sort(list, left_most, p_index - 1); recursive_quick_sort(list, p_index + 1, right_most); } } pub fn quick_sort<T: PartialOrd + Copy>(list: &mut [T]) { recursive_quick_sort(list, 0, list.len() as i32 - 1); } #[cfg(test)] mod tests { use super::*; #[test] fn test_bubble_sort() { let mut v = [3, 2, 5, 6, 1, 4]; bubble_sort(&mut v); assert_eq!(v, [1, 2, 3, 4, 5, 6]); } #[test] fn test_selection_sort() { let mut v = [4, 6, 3, 1, 5, 2]; selection_sort(&mut v); assert_eq!(v, [1, 2, 3, 4, 5, 6]); } #[test] fn test_insertion_sort() { let mut v = [1, 5, 3, 6, 2, 4]; insertion_sort(&mut v); assert_eq!(v, [1, 2, 3, 4, 5, 6]); } #[test] fn test_merge_sort() { let mut v = [2, 3, 4, 1, 6, 5]; merge_sort(&mut v); assert_eq!(v, [1, 2, 3, 4, 5, 6]); } #[test] fn test_quick_sort() { let mut v = [6, 1, 2, 5, 4, 3]; quick_sort(&mut v); assert_eq!(v, [1, 2, 3, 4, 5, 6]); } }
use std::slice; use ::ffi; use traits::FromRaw; pub struct MaterialProperty<'a> { raw: &'a ffi::AiMaterialProperty, } impl<'a> FromRaw<'a, MaterialProperty<'a>> for MaterialProperty<'a> { type Raw = *const ffi::AiMaterialProperty; #[inline(always)] fn from_raw(raw: &'a Self::Raw) -> MaterialProperty<'a> { MaterialProperty { raw: unsafe { raw.as_ref().expect("MaterialProperty pointer provided by Asssimp was NULL") } } } } impl<'a> MaterialProperty<'a> { //TODO } pub struct Material<'a> { raw: &'a ffi::AiMaterial, } impl<'a> FromRaw<'a, Material<'a>> for Material<'a> { type Raw = *const ffi::AiMaterial; #[inline(always)] fn from_raw(raw: &'a Self::Raw) -> Material<'a> { Material { raw: unsafe { raw.as_ref().expect("Material pointer provided by Assimp was NULL") } } } } impl<'a> Material<'a> { pub fn properties(&self) -> Option<Box<Iterator<Item=MaterialProperty<'a>>>> { if self.raw.num_properties == 0 || self.raw.properties.is_null() || self.raw.num_allocated == 0 { None } else { Some(Box::new(unsafe { slice::from_raw_parts(self.raw.properties, self.raw.num_properties as usize) .iter() .map(MaterialProperty::from_raw) })) } } }
use std::borrow::{Borrow, BorrowMut}; use std::cmp::Ordering; use std::fmt; use std::hash::{Hash, Hasher}; use std::ops::{Deref, DerefMut}; /// Original position of element in source code #[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Default, Hash)] pub struct Pos { /// One-based line number pub line: usize, /// One-based column number pub column: usize, } impl fmt::Debug for Pos { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Pos({}:{})", self.line, self.column) } } impl fmt::Display for Pos { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}:{}", self.line, self.column) } } #[derive(Copy, Clone, Debug, Default)] pub struct Span { pub start: Pos, pub end: Pos, } /// Represents the location of a AST node #[derive(Clone, Debug, Copy, Default)] #[allow(missing_docs)] pub struct Spanned<T: ?Sized> { pub span: Span, pub node: T, } impl<T: fmt::Display> fmt::Display for Spanned<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.node.fmt(f) } } impl<T: Clone> Spanned<T> { #[inline] #[allow(missing_docs)] pub fn clone_inner(&self) -> T { self.node.clone() } } impl<T: PartialEq> PartialEq for Spanned<T> { fn eq(&self, other: &Self) -> bool { self.node.eq(&other.node) } } impl<T: PartialOrd> PartialOrd for Spanned<T> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.node.partial_cmp(&other.node) } } impl<T: Ord> Ord for Spanned<T> { fn cmp(&self, other: &Self) -> Ordering { self.node.cmp(&other.node) } } impl<T: Ord> Eq for Spanned<T> {} impl<T: ?Sized> Deref for Spanned<T> { type Target = T; fn deref(&self) -> &T { &self.node } } impl<T: ?Sized> DerefMut for Spanned<T> { fn deref_mut(&mut self) -> &mut T { &mut self.node } } impl<T: Hash> Hash for Spanned<T> { fn hash<H: Hasher>(&self, state: &mut H) { self.node.hash(state) } } impl Borrow<str> for Spanned<String> { fn borrow(&self) -> &str { self.node.as_str() } } impl BorrowMut<str> for Spanned<String> { fn borrow_mut(&mut self) -> &mut str { self.node.as_mut_str() } } impl<T> Spanned<T> { pub(crate) fn new(node: T, pair_span: pest::Span<'_>) -> Spanned<T> { let ((start_line, start_column), (end_line, end_column)) = ( pair_span.start_pos().line_col(), pair_span.end_pos().line_col(), ); Spanned { node, span: Span { start: Pos { line: start_line, column: start_column, }, end: Pos { line: end_line, column: end_column, }, }, } } #[inline] pub fn into_inner(self) -> T { self.node } /// Get start position #[inline] pub fn position(&self) -> Pos { self.span.start } #[inline] pub(crate) fn pack<F: FnOnce(Self) -> R, R>(self, f: F) -> Spanned<R> { Spanned { span: self.span, node: f(self), } } }
use hacspec_dev::prelude::*; use hacspec_ed25519::*; use hacspec_edwards25519::*; use hacspec_lib::*; use quickcheck::QuickCheck; // Test vectors from https://datatracker.ietf.org/doc/rfc8032 create_test_vectors!( IetfTestVector, secret_key: String, public_key: String, message: String, signature: String ); // Test vectors from https://eprint.iacr.org/2020/1244.pdf // https://github.com/novifinancial/ed25519-speccheck create_test_vectors!( CasesTestVector, message: String, pub_key: String, signature: String ); #[test] fn test_secret_to_public() { let v: Vec<IetfTestVector> = IetfTestVector::from_file("tests/ietf_test_vectors.json"); for t in v { let sk = SecretKey::from_hex(&t.secret_key); let pk = PublicKey::from_hex(&t.public_key); let pk_r = secret_to_public(sk); assert_bytes_eq!(pk, pk_r); } } #[test] fn test_sign() { let v: Vec<IetfTestVector> = IetfTestVector::from_file("tests/ietf_test_vectors.json"); for t in v { let sk = SecretKey::from_hex(&t.secret_key); let msg = ByteSeq::from_hex(&t.message); let sig = Signature::from_hex(&t.signature); let sig_r = sign(sk, &msg); assert_bytes_eq!(sig, sig_r); } } #[test] fn test_verify() { let v: Vec<IetfTestVector> = IetfTestVector::from_file("tests/ietf_test_vectors.json"); for t in v { let pk = PublicKey::from_hex(&t.public_key); let msg = ByteSeq::from_hex(&t.message); let sig = Signature::from_hex(&t.signature); assert!(zcash_verify(pk, sig, &msg).is_ok()); assert!(ietf_cofactored_verify(pk, sig, &msg).is_ok()); assert!(ietf_cofactorless_verify(pk, sig, &msg).is_ok()); assert!(alg2_verify(pk, sig, &msg).is_ok()); } } #[test] fn test_sign_verify() { fn test_q(sk: (u128, u128), msg: String) -> bool { let (sk1, sk2) = sk; let sk = [sk2.to_le_bytes(), sk1.to_le_bytes()].concat(); let sk = SecretKey::from_public_slice(&sk); let pk = secret_to_public(sk); let msg = &ByteSeq::from_public_slice(msg.as_bytes()); let sig = sign(sk, &msg); zcash_verify(pk, sig, &msg).is_ok() && ietf_cofactored_verify(pk, sig, &msg).is_ok() && ietf_cofactorless_verify(pk, sig, &msg).is_ok() && alg2_verify(pk, sig, &msg).is_ok() } QuickCheck::new() .tests(30) .quickcheck(test_q as fn((u128, u128), String) -> bool); } #[test] fn test_batch() { let entropy = rand::random_byte_vec(512); let entropy = ByteSeq::from_public_slice(&entropy); let mut entries = Seq::<BatchEntry>::new(32); for i in 0..32usize { let sk = rand::random_byte_vec(32); let sk = SecretKey::from_public_slice(&sk); let pk = secret_to_public(sk); let msg = ByteSeq::from_public_slice(b"BatchVerifyTest"); let sig = sign(sk, &msg); entries[i] = BatchEntry(pk, msg, sig); } assert!(zcash_batch_verify(&entries, &entropy).is_ok()); assert!(ietf_cofactored_batch_verify(&entries, &entropy).is_ok()); assert!(ietf_cofactorless_batch_verify(&entries, &entropy).is_ok()); assert!(alg3_batch_verify(&entries, &entropy).is_ok()); } #[test] fn test_batch_bad() { let entropy = rand::random_byte_vec(512); let entropy = ByteSeq::from_public_slice(&entropy); let mut entries = Seq::<BatchEntry>::new(32); let bad_index = 10; for i in 0..32usize { let sk = rand::random_byte_vec(32); let sk = SecretKey::from_public_slice(&sk); let pk = secret_to_public(sk); let msg = ByteSeq::from_public_slice(b"BatchVerifyTest"); let sig = if i != bad_index { sign(sk, &msg) } else { sign(sk, &ByteSeq::from_public_slice(b"badmsg")) }; entries[i] = BatchEntry(pk, msg, sig); } assert!(zcash_batch_verify(&entries, &entropy).is_err()); assert!(ietf_cofactored_batch_verify(&entries, &entropy).is_err()); assert!(ietf_cofactorless_batch_verify(&entries, &entropy).is_err()); assert!(alg3_batch_verify(&entries, &entropy).is_err()); for i in 0..32usize { let BatchEntry(pk, msg, signature) = entries[i].clone(); if i != bad_index { assert!(zcash_verify(pk, signature, &msg).is_ok()); assert!(ietf_cofactored_verify(pk, signature, &msg).is_ok()); assert!(ietf_cofactorless_verify(pk, signature, &msg).is_ok()); assert!(alg2_verify(pk, signature, &msg).is_ok()); } else { assert!(zcash_verify(pk, signature, &msg).is_err()); assert!(ietf_cofactored_verify(pk, signature, &msg).is_err()); assert!(ietf_cofactorless_verify(pk, signature, &msg).is_err()); assert!(alg2_verify(pk, signature, &msg).is_err()); } } } // Testing test vectors from https://eprint.iacr.org/2020/1244.pdf // https://github.com/novifinancial/ed25519-speccheck #[test] fn test_vectors_zcash() { let v: Vec<CasesTestVector> = CasesTestVector::from_file("tests/cases.json"); let expected_success = [0, 1, 2, 3, 4, 5, 9, 10, 11]; println!("Zcash spec"); for i in 0..v.len() { let msg = ByteSeq::from_hex(&v[i].message); let pub_key = CompressedEdPoint::from_hex(&v[i].pub_key); let signature = Signature::from_hex(&v[i].signature); let result = zcash_verify(pub_key, signature, &msg); println!("Test {}: {:?}", i, result); if expected_success.contains(&i) { assert!(result.is_ok()); } else { assert!(result.is_err()); } } } #[test] fn test_vectors_ieft_cofactored() { let v: Vec<CasesTestVector> = CasesTestVector::from_file("tests/cases.json"); let expected_success = [0, 1, 2, 3, 4, 5]; println!("ietf cofactored spec"); for i in 0..v.len() { let msg = ByteSeq::from_hex(&v[i].message); let pub_key = CompressedEdPoint::from_hex(&v[i].pub_key); let signature = Signature::from_hex(&v[i].signature); let result = ietf_cofactored_verify(pub_key, signature, &msg); println!("Test {}: {:?}", i, result); if expected_success.contains(&i) { assert!(result.is_ok()); } else { assert!(result.is_err()); } } } #[test] fn test_vectors_ieft_cofactorless() { let v: Vec<CasesTestVector> = CasesTestVector::from_file("tests/cases.json"); let expected_success = [0, 1, 2, 3]; println!("ietf cofactorless spec"); for i in 0..v.len() { let msg = ByteSeq::from_hex(&v[i].message); let pub_key = CompressedEdPoint::from_hex(&v[i].pub_key); let signature = Signature::from_hex(&v[i].signature); let result = ietf_cofactorless_verify(pub_key, signature, &msg); println!("Test {}: {:?}", i, result); if expected_success.contains(&i) { assert!(result.is_ok()); } else { assert!(result.is_err()); } } } #[test] fn test_vectors_alg2() { let v: Vec<CasesTestVector> = CasesTestVector::from_file("tests/cases.json"); let expected_success = [2, 3, 4, 5]; println!("ietf cofactored spec"); for i in 0..v.len() { let msg = ByteSeq::from_hex(&v[i].message); let pub_key = CompressedEdPoint::from_hex(&v[i].pub_key); let signature = Signature::from_hex(&v[i].signature); let result = alg2_verify(pub_key, signature, &msg); println!("Test {}: {:?}", i, result); if expected_success.contains(&i) { assert!(result.is_ok()); } else { assert!(result.is_err()); } } }
use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, }; #[async_metronome::test] async fn test_ordering() { let ai1 = Arc::new(AtomicUsize::new(0)); let ai2 = ai1.clone(); let ai3 = ai1.clone(); let ordering = Ordering::SeqCst; let task1 = async move { ai1.compare_and_swap(0, 1, ordering); async_metronome::await_tick!(3); assert_eq!(ai1.load(ordering), 3); }; let task2 = async move { async_metronome::await_tick!(1); assert_eq!(ai2.compare_and_swap(1, 2, ordering), 1); async_metronome::await_tick!(3); assert_eq!(ai2.load(ordering), 3); }; let task3 = async move { async_metronome::await_tick!(2); assert_eq!(ai3.compare_and_swap(2, 3, ordering), 2); }; let s1 = async_metronome::spawn(task1); let s2 = async_metronome::spawn(task2); let s3 = async_metronome::spawn(task3); s1.await; s2.await; s3.await; }
use std::collections::BTreeMap; #[derive(Eq, PartialEq, Copy, Clone)] pub enum TransactionType { Read, Write, } pub(super) struct TransactionState { pub(super) ty: TransactionType, pub(super) offset_map: BTreeMap<u32, u64>, pub(super) frame_count: u32, pub(super) db_file_size: u64, } impl TransactionState { pub(super) fn new(ty: TransactionType, frame_count: u32, db_file_size: u64) -> TransactionState { TransactionState { ty, offset_map: BTreeMap::new(), frame_count, db_file_size, } } }
use bellman::gadgets::multipack; use bellman::groth16; use bitvec::{order::Lsb0, view::AsBits}; use blake2s_simd::Params as Blake2sParams; use bls12_381::Bls12; use ff::{Field, PrimeField}; use group::{Curve, GroupEncoding}; use rand::rngs::OsRng; use std::io; use std::time::Instant; use super::coin::merkle_hash; use crate::circuit::spend_contract::SpendContract; use crate::error::{Error, Result}; use crate::serial::{Decodable, Encodable}; pub struct SpendRevealedValues { pub value_commit: jubjub::SubgroupPoint, pub nullifier: [u8; 32], // This should not be here, we just have it for debugging //coin: [u8; 32], pub merkle_root: bls12_381::Scalar, pub signature_public: jubjub::SubgroupPoint, } impl SpendRevealedValues { fn compute( value: u64, randomness_value: &jubjub::Fr, serial: &jubjub::Fr, randomness_coin: &jubjub::Fr, secret: &jubjub::Fr, merkle_path: &[(bls12_381::Scalar, bool)], signature_secret: &jubjub::Fr, ) -> Self { let value_commit = (zcash_primitives::constants::VALUE_COMMITMENT_VALUE_GENERATOR * jubjub::Fr::from(value)) + (zcash_primitives::constants::VALUE_COMMITMENT_RANDOMNESS_GENERATOR * randomness_value); let mut nullifier = [0; 32]; nullifier.copy_from_slice( Blake2sParams::new() .hash_length(32) .personal(zcash_primitives::constants::PRF_NF_PERSONALIZATION) .to_state() .update(&secret.to_bytes()) .update(&serial.to_bytes()) .finalize() .as_bytes(), ); let public = zcash_primitives::constants::SPENDING_KEY_GENERATOR * secret; let signature_public = zcash_primitives::constants::SPENDING_KEY_GENERATOR * signature_secret; let mut coin = [0; 32]; coin.copy_from_slice( Blake2sParams::new() .hash_length(32) .personal(zcash_primitives::constants::CRH_IVK_PERSONALIZATION) .to_state() .update(&public.to_bytes()) .update(&value.to_le_bytes()) .update(&serial.to_bytes()) .update(&randomness_coin.to_bytes()) .finalize() .as_bytes(), ); let merkle_root = jubjub::ExtendedPoint::from(zcash_primitives::pedersen_hash::pedersen_hash( zcash_primitives::pedersen_hash::Personalization::NoteCommitment, multipack::bytes_to_bits_le(&coin), )); let affine = merkle_root.to_affine(); let mut merkle_root = affine.get_u(); for (i, (right, is_right)) in merkle_path.iter().enumerate() { if *is_right { merkle_root = merkle_hash(i, &right.to_repr(), &merkle_root.to_repr()); } else { merkle_root = merkle_hash(i, &merkle_root.to_repr(), &right.to_repr()); } } SpendRevealedValues { value_commit, nullifier, merkle_root, signature_public, } } fn make_outputs(&self) -> [bls12_381::Scalar; 7] { let mut public_input = [bls12_381::Scalar::zero(); 7]; // CV { let result = jubjub::ExtendedPoint::from(self.value_commit); let affine = result.to_affine(); //let (u, v) = (affine.get_u(), affine.get_v()); let u = affine.get_u(); let v = affine.get_v(); public_input[0] = u; public_input[1] = v; } // NF { // Pack the hash as inputs for proof verification. let hash = multipack::bytes_to_bits_le(&self.nullifier); let hash = multipack::compute_multipacking(&hash); // There are 2 chunks for a blake hash assert_eq!(hash.len(), 2); public_input[2] = hash[0]; public_input[3] = hash[1]; } // Not revealed. We leave this code here for debug // Coin /*{ // Pack the hash as inputs for proof verification. let hash = multipack::bytes_to_bits_le(&self.coin); let hash = multipack::compute_multipacking(&hash); // There are 2 chunks for a blake hash assert_eq!(hash.len(), 2); public_input[4] = hash[0]; public_input[5] = hash[1]; }*/ public_input[4] = self.merkle_root; { let result = jubjub::ExtendedPoint::from(self.signature_public); let affine = result.to_affine(); //let (u, v) = (affine.get_u(), affine.get_v()); let u = affine.get_u(); let v = affine.get_v(); public_input[5] = u; public_input[6] = v; } public_input } } impl Encodable for SpendRevealedValues { fn encode<S: io::Write>(&self, mut s: S) -> Result<usize> { let mut len = 0; len += self.value_commit.encode(&mut s)?; len += self.nullifier.encode(&mut s)?; len += self.merkle_root.encode(&mut s)?; len += self.signature_public.encode(s)?; Ok(len) } } impl Decodable for SpendRevealedValues { fn decode<D: io::Read>(mut d: D) -> Result<Self> { Ok(Self { value_commit: Decodable::decode(&mut d)?, nullifier: Decodable::decode(&mut d)?, merkle_root: Decodable::decode(&mut d)?, signature_public: Decodable::decode(d)?, }) } } pub fn setup_spend_prover() -> groth16::Parameters<Bls12> { println!("Making random params..."); let start = Instant::now(); let params = { let c = SpendContract { value: None, randomness_value: None, serial: None, randomness_coin: None, secret: None, branch_0: None, is_right_0: None, branch_1: None, is_right_1: None, branch_2: None, is_right_2: None, branch_3: None, is_right_3: None, signature_secret: None, }; groth16::generate_random_parameters::<Bls12, _, _>(c, &mut OsRng).unwrap() }; println!("Setup: [{:?}]", start.elapsed()); params } pub fn create_spend_proof( params: &groth16::Parameters<Bls12>, value: u64, randomness_value: jubjub::Fr, serial: jubjub::Fr, randomness_coin: jubjub::Fr, secret: jubjub::Fr, merkle_path: Vec<(bls12_381::Scalar, bool)>, signature_secret: jubjub::Fr, ) -> (groth16::Proof<Bls12>, SpendRevealedValues) { assert_eq!(merkle_path.len(), 4); assert_eq!( merkle_path.len(), super::coin::SAPLING_COMMITMENT_TREE_DEPTH ); let c = SpendContract { value: Some(value), randomness_value: Some(randomness_value), serial: Some(serial), randomness_coin: Some(randomness_coin), secret: Some(secret), branch_0: Some(merkle_path[0].0), is_right_0: Some(merkle_path[0].1), branch_1: Some(merkle_path[1].0), is_right_1: Some(merkle_path[1].1), branch_2: Some(merkle_path[2].0), is_right_2: Some(merkle_path[2].1), branch_3: Some(merkle_path[3].0), is_right_3: Some(merkle_path[3].1), signature_secret: Some(signature_secret), }; let start = Instant::now(); let proof = groth16::create_random_proof(c, params, &mut OsRng).unwrap(); println!("Prove: [{:?}]", start.elapsed()); let revealed = SpendRevealedValues::compute( value, &randomness_value, &serial, &randomness_coin, &secret, &merkle_path, &signature_secret, ); (proof, revealed) } pub fn verify_spend_proof( pvk: &groth16::PreparedVerifyingKey<Bls12>, proof: &groth16::Proof<Bls12>, revealed: &SpendRevealedValues, ) -> bool { let public_input = revealed.make_outputs(); let start = Instant::now(); let result = groth16::verify_proof(pvk, proof, &public_input).is_ok(); println!("Verify: [{:?}]", start.elapsed()); result }
//! This module holds some macros that should be usable everywhere within the //! kernel. /// Creates a `&'static str` from a c string. /// /// Converts the string at the given address from a c string to a rust /// `&'static str`. /// Optionally if the length is known, the process can be sped up, by passing /// it. #[macro_export] macro_rules! from_c_str { ($address:expr, $length:expr) => {{ use core::slice; use core::str; unsafe { let null_value: u8 = *($address + $length).as_ptr(); assert_eq!(null_value, 0); } if $length > 0 { let bytes: &[u8] = unsafe { slice::from_raw_parts($address.as_ptr(), $length as usize) }; str::from_utf8(bytes) } else { Ok("") } }}; ($address:expr) => {{ let mut address: VirtualAddress = $address; unsafe { while *(address.as_ptr::<u8>()) != 0 { address += 1; } } from_c_str!($address, (address - $address)) }}; } /// Creates a `&'static str` from a pointer to a raw string and it's length. #[macro_export] macro_rules! from_raw_str { ($address:expr, $length:expr) => {{ use core::slice; use core::str; if $length > 0 { let ptr: *const u8 = $address.as_ptr(); let bytes: &[u8] = unsafe { slice::from_raw_parts(ptr, $length as usize) }; str::from_utf8(bytes) } else { Ok("") } }}; } /// Converts to a virtual address. /// /// Converts a given physical address within the kernel part of memory to its /// corresponding /// virtual address. #[macro_export] #[cfg(target_arch = "x86_64")] macro_rules! to_virtual { ($address:expr) => {{ const KERNEL_OFFSET: usize = 0xffff800000000000; $address as usize + KERNEL_OFFSET }}; } /// Returns true for a valid virtual address. #[macro_export] macro_rules! valid_address { ($address:expr) => {{ if cfg!(arch = "x86_64") { use arch::x86_64::memory::{VIRTUAL_HIGH_MIN_ADDRESS, VIRTUAL_LOW_MAX_ADDRESS}; (VIRTUAL_LOW_MAX_ADDRESS >= $address || $address >= VIRTUAL_HIGH_MIN_ADDRESS) } else { true } }}; } /// Used to define statics that are local to each cpu core. macro_rules! cpu_local { ($(#[$attr: meta])* static ref $name: ident : $type: ty = $val: expr;) => { __cpu_local_internal!($(#[$attr])*, CPULocal, $name, $type, $val); }; ($(#[$attr: meta])* pub static ref $name: ident : $type: ty = $val: expr;) => { __cpu_local_internal!($(#[$attr])*, pub, CPULocal, $name, $type, $val); }; ($(#[$attr: meta])* static mut ref $name: ident : $type: ty = $val: expr;) => { __cpu_local_internal!($(#[$attr])*, CPULocalMut, $name, $type, $val); }; ($(#[$attr: meta])* pub static mut ref $name: ident : $type: ty = $val: expr;) => { __cpu_local_internal!($(#[$attr])*, pub, CPULocalMut, $name, $type, $val); }; } macro_rules! __cpu_local_internal { ($(#[$attr: meta])*, pub, $wrapper_type: ident, $name: ident, $type: ty, $val: expr) => { lazy_static! { $(#[$attr])* pub static ref $name: ::multitasking::$wrapper_type<$type> = { use alloc::Vec; use multitasking::get_cpu_num; let cpu_num = get_cpu_num(); let mut vec = Vec::with_capacity(cpu_num); for i in 0..cpu_num { vec.push($val(i)); } unsafe { ::multitasking::$wrapper_type::new(vec) } }; } }; ($(#[$attr: meta])*, $wrapper_type: ident, $name: ident, $type: ty, $val: expr) => { lazy_static! { $(#[$attr])* static ref $name: ::multitasking::$wrapper_type<$type> = { use alloc::Vec; use multitasking::get_cpu_num; let cpu_num = get_cpu_num(); let mut vec = Vec::with_capacity(cpu_num); for i in 0..cpu_num { vec.push($val(i)); } unsafe { ::multitasking::$wrapper_type::new(vec) } }; } }; }
///! Server control command ///! use std::fmt; use std::net::SocketAddr; use crate::session::SessionId; pub enum ServerCommand<T> { /// terminate Terminate, /// connected stream and client address Connect(T, SocketAddr), Disconnect(SessionId), } impl<T> fmt::Debug for ServerCommand<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use ServerCommand::*; match self { Terminate => write!(f, "Terminate"), Connect(_, addr) => write!(f, "Connect(_, {})", addr), Disconnect(id) => write!(f, "Disconnect({})", id), } } }
/** * Implements Default for an enum. * Requires the enum to have a variant named "Default" */ #[macro_export] macro_rules! enum_default { ($name: ident) => { impl Default for $name { fn default() -> Self { $name::Default } } }; } /** * Implements a fmt trait for an enum. * The output is the enum as a number */ #[macro_export] macro_rules! enum_fmt_impl { ($name: ident, $trait: ident) => { impl $trait for $name { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { $trait::fmt(&(self.clone() as u8), f) } } }; } /** * Implements the Display trait for an enum. * The output is the enum as a number */ #[macro_export] macro_rules! enum_display { ($name: ident) => { impl Display for $name { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.clone() as u8) } } }; } /** Implements several traits for a macro */ #[macro_export] macro_rules! enum_impls { ($name: ident) => { enum_default!($name); enum_display!($name); enum_fmt_impl!($name, Binary); enum_fmt_impl!($name, Octal); enum_fmt_impl!($name, LowerHex); enum_fmt_impl!($name, UpperHex); }; } /** Implements several enums */ #[macro_export] macro_rules! impl_enums { ($($enum: ident),*) => { $(enum_impls!($enum);)* }; } /** adds a set of functions to the trait */ #[macro_export] macro_rules! chalk_trait_fns { ($($name: ident),*) => { $(fn $name(&mut self) -> &mut Self;)* }; } /** Sets up an alias for a function */ #[macro_export] macro_rules! fn_alias { ($alias: ident, $fn: ident) => { fn $alias(&mut self) -> &mut Self {self.$fn()} }; }
pub mod error; use error::Error; pub mod ioops; use ioops::{create_and_write_archive, Manifest}; pub mod runtime; use runtime::Operation; use std::fs::File; pub fn run(operation: Operation) -> Result<(), Error> { match operation { Operation::Backup(profile_path, archive_file_name) => { let profile_path = std::path::Path::new(profile_path); let mut profile = Manifest::new(profile_path)?; let iterator = profile.files()?; let output_handle = File::create(archive_file_name)?; create_and_write_archive(iterator, &output_handle, profile_path)?; Ok(()) } } }
use bitcoin::util::base58; use byteorder::{LittleEndian, ReadBytesExt}; use hex; use std::io::prelude::*; use std::io::Cursor; use std::io::SeekFrom; use enums::TransactionType; use identities::{address, public_key}; use transactions::transaction::{Asset, Transaction}; use utils; pub fn deserialize(serialized: &str) -> Transaction { let decoded = hex::decode(serialized).unwrap(); let mut bytes = Cursor::new(decoded.as_slice()); let mut transaction = Transaction::default(); let mut asset_offset = deserialize_header(&mut bytes, &mut transaction); deserialize_type(&mut bytes, &mut transaction, &serialized, &mut asset_offset); parse_signatures(&mut transaction, &serialized, asset_offset); if transaction.version == 1 { handle_version_one(&mut transaction); } transaction } fn deserialize_header(bytes: &mut Cursor<&[u8]>, transaction: &mut Transaction) -> usize { transaction.header = bytes.read_u8().unwrap(); transaction.version = bytes.read_u8().unwrap(); transaction.network = bytes.read_u8().unwrap(); transaction.type_id = bytes.read_u8().unwrap().into(); transaction.timestamp = bytes.read_u32::<LittleEndian>().unwrap(); let mut sender_public_key_buf = [0; 33]; bytes.read_exact(&mut sender_public_key_buf).unwrap(); transaction.sender_public_key = hex::encode(sender_public_key_buf.to_vec()); transaction.fee = bytes.read_u64::<LittleEndian>().unwrap(); let vendor_field_length = bytes.read_u8().unwrap() as usize; if vendor_field_length > 0 { let mut vendor_field_buf: Vec<u8> = vec![0; vendor_field_length]; bytes.read_exact(&mut vendor_field_buf).unwrap(); transaction.vendor_field_hex = hex::encode(&vendor_field_buf); } (50 * 2 + vendor_field_length * 2) as usize } fn deserialize_type( bytes: &mut Cursor<&[u8]>, mut transaction: &mut Transaction, serialized: &str, mut asset_offset: &mut usize, ) { match transaction.type_id { TransactionType::Transfer => { deserialize_transfer(bytes, &mut transaction, &mut asset_offset) } TransactionType::SecondSignatureRegistration => deserialize_second_signature_registration( bytes, &mut transaction, serialized, &mut asset_offset, ), TransactionType::DelegateRegistration => deserialize_delegate_registration( bytes, &mut transaction, serialized, &mut asset_offset, ), TransactionType::Vote => { deserialize_vote(bytes, &mut transaction, serialized, &mut asset_offset) } TransactionType::MultiSignatureRegistration => { deserialize_multi_signature_registration(bytes, &mut transaction, &mut asset_offset) } TransactionType::Ipfs => (), TransactionType::TimelockTransfer => (), TransactionType::MultiPayment => (), TransactionType::DelegateResignation => (), } } fn deserialize_transfer( bytes: &mut Cursor<&[u8]>, transaction: &mut Transaction, asset_offset: &mut usize, ) { bytes .seek(SeekFrom::Start(*asset_offset as u64 / 2)) .unwrap(); transaction.amount = bytes.read_u64::<LittleEndian>().unwrap(); transaction.expiration = bytes.read_u32::<LittleEndian>().unwrap(); let mut recipient_id_buf = [0; 21]; bytes.read_exact(&mut recipient_id_buf).unwrap(); transaction.recipient_id = base58::check_encode_slice(&recipient_id_buf); *asset_offset += (21 + 12) * 2; } fn deserialize_second_signature_registration( _bytes: &mut Cursor<&[u8]>, transaction: &mut Transaction, serialized: &str, asset_offset: &mut usize, ) { transaction.asset = Asset::Signature { public_key: serialized.chars().skip(*asset_offset).take(66).collect(), }; *asset_offset += 66; } fn deserialize_delegate_registration( bytes: &mut Cursor<&[u8]>, transaction: &mut Transaction, serialized: &str, asset_offset: &mut usize, ) { let username_length = bytes.read_u8().unwrap() as usize; let username: String = serialized .chars() .skip(*asset_offset + 2) .take(username_length * 2) .collect(); transaction.asset = Asset::Delegate { username: utils::str_from_hex(&username).unwrap(), }; *asset_offset += (username_length + 1) * 2; } fn deserialize_vote( bytes: &mut Cursor<&[u8]>, transaction: &mut Transaction, serialized: &str, asset_offset: &mut usize, ) { let vote_length = bytes.read_u8().unwrap() as usize; *asset_offset += 2; let mut votes = Vec::with_capacity(vote_length); for i in 0..vote_length { let index_start = *asset_offset + (i * 2 * 34); let index_end = 2 * 34 - 2; let vote_type: String = serialized.chars().skip(index_start + 1).take(1).collect(); let mut vote: String = serialized .chars() .skip(index_start + 2) .take(index_end) .collect(); assert!(vote_type == "1" || vote_type == "0"); if vote_type == "1" { vote.insert_str(0, "+"); } else { vote.insert_str(0, "-"); } votes.push(vote); } transaction.asset = Asset::Votes(votes); *asset_offset += vote_length * 34 * 2; } fn deserialize_multi_signature_registration( bytes: &mut Cursor<&[u8]>, transaction: &mut Transaction, asset_offset: &mut usize, ) { let min = bytes.read_u8().unwrap(); let number_of_signatures = bytes.read_u8().unwrap() as usize; let lifetime = bytes.read_u8().unwrap(); let mut keysgroup = Vec::with_capacity(number_of_signatures); for _ in 0..number_of_signatures { let mut public_key_buf = [0; 33]; bytes.read_exact(&mut public_key_buf).unwrap(); keysgroup.push(hex::encode(public_key_buf.to_vec())) } transaction.asset = Asset::MultiSignatureRegistration { keysgroup, min, lifetime, }; *asset_offset += 6 + number_of_signatures * 66; } fn parse_signatures(transaction: &mut Transaction, serialized: &str, asset_offset: usize) { let signature: String = serialized.chars().skip(asset_offset).collect(); let mut multi_signature_offset = 0; if !signature.is_empty() { let signature_length_str: String = signature.chars().skip(2).take(2).collect(); let signature_length = (u8::from_str_radix(&signature_length_str, 16).unwrap() + 2) as usize; transaction.signature = serialized .chars() .skip(asset_offset) .take(signature_length * 2) .collect(); multi_signature_offset += signature_length * 2; let second_signature: String = serialized .chars() .skip(asset_offset + signature_length * 2) .collect(); if !second_signature.is_empty() && !second_signature.starts_with("ff") { let second_signature_length_str: String = second_signature.chars().skip(2).take(2).collect(); let second_signature_length = (u8::from_str_radix(&second_signature_length_str, 16).unwrap() + 2) as usize; transaction.second_signature = second_signature .chars() .take(second_signature_length * 2) .collect(); multi_signature_offset += second_signature_length * 2; } let mut signatures: String = serialized .chars() .skip(asset_offset + multi_signature_offset) .collect(); if signatures.is_empty() || !signatures.starts_with("ff") { return; } signatures = signatures.chars().skip(2).collect(); loop { if signatures.is_empty() { break; } let multi_signature_length_str: String = signatures.chars().skip(2).take(2).collect(); let multi_signature_length = (u8::from_str_radix(&multi_signature_length_str, 16).unwrap() + 2) as usize; if multi_signature_length > 0 { let multi_signature: String = signatures .chars() .take(multi_signature_length * 2) .collect(); transaction.signatures.push(multi_signature); signatures = signatures .chars() .skip(multi_signature_length * 2) .collect(); } else { break; } } } } fn handle_version_one(transaction: &mut Transaction) { if !transaction.second_signature.is_empty() { transaction.sign_signature = transaction.second_signature.to_owned(); } match transaction.type_id { TransactionType::Vote => { let public_key = public_key::from_hex(&transaction.sender_public_key).unwrap(); transaction.recipient_id = address::from_public_key(&public_key, Some(transaction.network)); } TransactionType::MultiSignatureRegistration => { if let Asset::MultiSignatureRegistration { ref mut keysgroup, .. } = transaction.asset { let keysgroup = keysgroup.as_mut_slice(); for key in keysgroup { *key = String::from("+") + key; } } } _ => (), } if !transaction.vendor_field_hex.is_empty() { transaction.vendor_field = utils::str_from_hex(&transaction.vendor_field_hex).unwrap(); } if transaction.id.is_empty() { transaction.id = transaction.get_id(); } match transaction.type_id { TransactionType::SecondSignatureRegistration => { let public_key = public_key::from_hex(&transaction.sender_public_key).unwrap(); transaction.recipient_id = address::from_public_key(&public_key, Some(transaction.network)); } TransactionType::MultiSignatureRegistration => { let public_key = public_key::from_hex(&transaction.sender_public_key).unwrap(); transaction.recipient_id = address::from_public_key(&public_key, Some(transaction.network)); } _ => (), } }
use serde::de::{self, Deserialize, Deserializer, Visitor}; use serde_derive::Deserialize; use std::fmt; #[derive(Deserialize)] #[serde(deny_unknown_fields)] pub struct InheritEdition { pub workspace: True, } pub struct True; impl<'de> Deserialize<'de> for True { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_bool(True) } } impl<'de> Visitor<'de> for True { type Value = True; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("bool") } fn visit_bool<E>(self, b: bool) -> Result<Self::Value, E> where E: de::Error, { if b { Ok(True) } else { Err(de::Error::custom( "workspace=false is unsupported for package.edition", )) } } }
use regex::Regex; #[derive(Debug, PartialEq, Eq)] pub struct Passport { birth_year: Option<String>, issue_year: Option<String>, expiration_year: Option<String>, height: Option<String>, hair_color: Option<String>, eye_color: Option<String>, passport_id: Option<String>, country_id: Option<String>, } impl Passport { pub fn new() -> Self { Passport { birth_year: None, issue_year: None, expiration_year: None, height: None, hair_color: None, eye_color: None, passport_id: None, country_id: None, } } } #[aoc_generator(day4)] pub fn input_generator(input: &str) -> Vec<Passport> { input .split("\n\n") .map(|p| { let mut passport = Passport::new(); for e in p.split_whitespace() { match &e[0..3] { "byr" => passport.birth_year = Some(e[4..].to_string()), "iyr" => passport.issue_year = Some(e[4..].to_string()), "eyr" => passport.expiration_year = Some(e[4..].to_string()), "hgt" => passport.height = Some(e[4..].to_string()), "hcl" => passport.hair_color = Some(e[4..].to_string()), "ecl" => passport.eye_color = Some(e[4..].to_string()), "pid" => passport.passport_id = Some(e[4..].to_string()), "cid" => passport.country_id = Some(e[4..].to_string()), _ => panic!("Invalid input!"), } } passport }) .collect() } #[aoc(day4, part1)] pub fn part1(input: &Vec<Passport>) -> usize { let mut valid_count = 0; for p in input.iter() { if !p.birth_year.is_some() || !p.issue_year.is_some() || !p.expiration_year.is_some() || !p.height.is_some() || !p.hair_color.is_some() || !p.eye_color.is_some() || !p.passport_id.is_some() { continue; } valid_count += 1; } valid_count } #[aoc(day4, part2)] pub fn part2(input: &Vec<Passport>) -> usize { let mut valid_count = 0; for p in input.iter() { match &p.birth_year { Some(b) => { let b_int = b.parse::<usize>().unwrap(); if b_int < 1920 || b_int > 2002 { continue; } } None => continue, } match &p.issue_year { Some(b) => { let b_int = b.parse::<usize>().unwrap(); if b_int < 2010 || b_int > 2020 { continue; } } None => continue, } match &p.expiration_year { Some(b) => { let b_int = b.parse::<usize>().unwrap(); if b_int < 2020 || b_int > 2030 { continue; } } None => continue, } match &p.height { Some(h) => match &h[h.len() - 2..] { "cm" => { let h_int = h[..h.len() - 2].parse::<usize>().unwrap(); if h_int < 150 || h_int > 193 { continue; } } "in" => { let h_int = h[..h.len() - 2].parse::<usize>().unwrap(); if h_int < 59 || h_int > 76 { continue; } } _ => continue, }, None => continue, } match &p.hair_color { Some(hc) => { let re = Regex::new(r"^#([0-9a-fA-F]{3}){1,2}$").unwrap(); if !re.is_match(hc) { continue; } } None => continue, } match &p.eye_color { Some(ec) => match &ec[..] { "amb" | "blu" | "brn" | "gry" | "grn" | "hzl" | "oth" => (), _ => continue, }, None => continue, } match &p.passport_id { Some(pid) => { let re = Regex::new(r"^[0-9]{9}$").unwrap(); if !re.is_match(pid) { continue; } } None => continue, } valid_count += 1; } valid_count } #[cfg(test)] mod tests { use super::*; #[test] fn test_input() { let input = "ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n\ byr:1937 iyr:2017 cid:147 hgt:183cm\n\ \n\ iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n\ hcl:#cfa07d byr:1929"; let mut expected: Vec<Passport> = Vec::new(); expected.push(Passport { birth_year: Some("1937".to_string()), issue_year: Some("2017".to_string()), expiration_year: Some("2020".to_string()), height: Some("183cm".to_string()), hair_color: Some("#fffffd".to_string()), eye_color: Some("gry".to_string()), passport_id: Some("860033327".to_string()), country_id: Some("147".to_string()), }); expected.push(Passport { birth_year: Some("1929".to_string()), issue_year: Some("2013".to_string()), expiration_year: Some("2023".to_string()), height: None, hair_color: Some("#cfa07d".to_string()), eye_color: Some("amb".to_string()), passport_id: Some("028048884".to_string()), country_id: Some("350".to_string()), }); assert_eq!(expected, input_generator(input)); } #[test] fn test_part1() { let input = "ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n\ byr:1937 iyr:2017 cid:147 hgt:183cm\n\ \n\ iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n\ hcl:#cfa07d byr:1929\n\ \n\ hcl:#ae17e1 iyr:2013\n\ eyr:2024\n\ ecl:brn pid:760753108 byr:1931\n\ hgt:179cm\n\ \n\ hcl:#cfa07d eyr:2025 pid:166559648\n\ iyr:2011 ecl:brn hgt:59in"; assert_eq!(part1(&input_generator(input)), 2); } #[test] fn test_part2() { let input = "eyr:1972 cid:100\n\ hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n\ \n\ iyr:2019\n\ hcl:#602927 eyr:1967 hgt:170cm\n\ ecl:grn pid:012533040 byr:1946\n\ \n\ hcl:dab227 iyr:2012\n\ ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n\ \n\ hgt:59cm ecl:zzz\n\ eyr:2038 hcl:74454a iyr:2023\n\ pid:3556412378 byr:2007\n\ pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n\ hcl:#623a2f\n\ \n\ eyr:2029 ecl:blu cid:129 byr:1989\n\ iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n\ \n\ hcl:#888785\n\ hgt:164cm byr:2001 iyr:2015 cid:88\n\ pid:545766238 ecl:hzl\n\ eyr:2022\n\ \n\ iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719"; assert_eq!(part2(&input_generator(input)), 4); } }
// TODO use https://docs.rs/embedded-graphics/0.7.1/embedded_graphics/draw_target/trait.DrawTargetExt.html // to clip while scrolling use crate::hal::prelude::{OutputPin, _embedded_hal_blocking_delay_DelayUs as DelayUs}; use display_interface::WriteOnlyDataCommand; use pinetime_common::embedded_graphics::{ draw_target::DrawTarget, pixelcolor::Rgb565, prelude::*, primitives::Rectangle, }; use pinetime_common::{display, AnimatedDisplay, RefreshDirection}; use st7789::{Error, Orientation, ST7789}; pub const SCROLL_DELTA: u16 = 16; pub struct AnimatedSt7789<DI, RST> where DI: WriteOnlyDataCommand, RST: OutputPin, { in_progress_animation: Option<RefreshDirection>, scroll_offset: u16, display: ST7789<DI, RST>, } impl<DI, RST, PinE> AnimatedSt7789<DI, RST> where DI: WriteOnlyDataCommand, RST: OutputPin<Error = PinE>, { pub fn new(di: DI, rst: RST, size_x: u16, size_y: u16) -> Self { AnimatedSt7789 { in_progress_animation: None, scroll_offset: 0, display: ST7789::new(di, rst, size_x, size_y), } } pub fn init(&mut self, delay_source: &mut impl DelayUs<u32>) -> Result<(), Error<PinE>> { self.display.init(delay_source)?; self.display.set_orientation(Orientation::Portrait)?; Ok(()) } } impl<DI, OUT, PinE> DrawTarget for AnimatedSt7789<DI, OUT> where DI: WriteOnlyDataCommand, OUT: OutputPin<Error = PinE>, { type Error = Error<PinE>; type Color = Rgb565; fn draw_iter<T>(&mut self, item: T) -> Result<(), Self::Error> where T: IntoIterator<Item = Pixel<Rgb565>>, { self.display.draw_iter(item) } fn fill_contiguous<I>(&mut self, area: &Rectangle, colors: I) -> Result<(), Self::Error> where I: IntoIterator<Item = Self::Color>, { self.display.fill_contiguous(area, colors) } fn fill_solid(&mut self, area: &Rectangle, color: Self::Color) -> Result<(), Self::Error> { self.display.fill_solid(area, color) } fn clear(&mut self, color: Rgb565) -> Result<(), Self::Error> where Self: Sized, { self.display.clear(color) } } impl<DI, OUT, PinE> OriginDimensions for AnimatedSt7789<DI, OUT> where DI: WriteOnlyDataCommand, OUT: OutputPin<Error = PinE>, { fn size(&self) -> Size { self.display.size() } } impl<DI, OUT, PinE> AnimatedDisplay for AnimatedSt7789<DI, OUT> where DI: WriteOnlyDataCommand, OUT: OutputPin<Error = PinE>, { type Error = Error<PinE>; fn set_refresh_direction(&mut self, refresh_dir: RefreshDirection) { if self.in_progress_animation.is_none() { self.in_progress_animation = refresh_dir.into(); self.scroll_offset = match refresh_dir { RefreshDirection::Up => 0, RefreshDirection::Down => display::VERT_LINES, }; } } // TODO - instead of Clipped+force-redraws, do fill_solid in here to fill background as // scrolling progresses // rect size is (width, SCROLL_DELTA), offset moves with scroll_offset fn update_animations(&mut self) -> Result<(), Error<PinE>> { let is_done = match self.in_progress_animation { Some(RefreshDirection::Up) => { self.display.set_scroll_offset(self.scroll_offset)?; self.scroll_offset += SCROLL_DELTA; self.scroll_offset = self.scroll_offset.clamp(0, display::VERT_LINES); self.scroll_offset == display::VERT_LINES } Some(RefreshDirection::Down) => { self.display.set_scroll_offset(self.scroll_offset)?; self.scroll_offset = self.scroll_offset.wrapping_sub(SCROLL_DELTA); self.scroll_offset = self.scroll_offset.clamp(0, display::VERT_LINES); self.scroll_offset == display::VERT_LINES } _ => true, }; if is_done { self.in_progress_animation = None; } Ok(()) } }
/** Single item to verification: - SP Trie with RootHash - BLS MS - set of key-value to verify */ #[derive(Serialize, Deserialize, Debug)] pub struct ParsedSP { /// encoded SP Trie transferred from Node to Client pub proof_nodes: String, /// RootHash of the Trie, start point for verification. Should be same with appropriate filed in BLS MS data pub root_hash: String, /// entities to verification against current SP Trie pub kvs_to_verify: KeyValuesInSP, /// BLS MS data for verification pub multi_signature: serde_json::Value, } /** Variants of representation for items to verify against SP Trie Right now 2 options are specified: - simple array of key-value pair - whole subtrie */ #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] #[serde(tag = "type")] pub enum KeyValuesInSP { Simple(KeyValueSimpleData), SubTrie(KeyValuesSubTrieData), } /** Simple variant of `KeyValuesInSP`. All required data already present in parent SP Trie (built from `proof_nodes`). `kvs` can be verified directly in parent trie Encoding of `key` in `kvs` is defined by verification type */ #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] pub struct KeyValueSimpleData { pub kvs: Vec<(String /* key */, Option<String /* val */>)>, #[serde(default)] pub verification_type: KeyValueSimpleDataVerificationType, } /** Options of common state proof check process */ #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] #[serde(tag = "type")] pub enum KeyValueSimpleDataVerificationType { /* key should be base64-encoded string */ Simple, /* key should be plain string */ NumericalSuffixAscendingNoGaps(NumericalSuffixAscendingNoGapsData), /* nodes are from a simple merkle tree */ MerkleTree(u64), } impl Default for KeyValueSimpleDataVerificationType { fn default() -> Self { KeyValueSimpleDataVerificationType::Simple } } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] pub struct NumericalSuffixAscendingNoGapsData { pub from: Option<u64>, pub next: Option<u64>, pub prefix: String, } /** Subtrie variant of `KeyValuesInSP`. In this case Client (libindy) should construct subtrie and append it into trie based on `proof_nodes`. After this preparation each kv pair can be checked. */ #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] pub struct KeyValuesSubTrieData { /// base64-encoded common prefix of each pair in `kvs`. Should be used to correct merging initial trie and subtrie pub sub_trie_prefix: Option<String>, pub kvs: Vec<( String, /* b64-encoded key_suffix */ Option<String /* val */>, )>, }
use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashSet; pub fn exercise() { let data = load_data(); compute_manhattan_distance(data); } fn compute_manhattan_distance(data: Vec<String>) { let directions = vec!['N','E','S','W']; let mut current_direction = 'E'; let mut east_west_pos = 0; let mut north_south_pos = 0; for instruction in data { let direction = instruction.chars().next().unwrap(); let amount: i32 = instruction[1..instruction.len()].parse().unwrap(); if direction == 'R' { let moves = (amount/90) as usize; let current_index = directions.iter().position(|&i| i == current_direction).unwrap(); let mut next_index = current_index as i32 + moves as i32; if next_index >= directions.len() as i32 { next_index -= directions.len() as i32; } current_direction = *directions.get(next_index as usize).unwrap(); } else if direction == 'L' { let moves = (amount/90) as usize; let current_index = directions.iter().position(|&i| i == current_direction).unwrap(); let mut next_index = current_index as i32 - moves as i32; if(next_index < 0) { next_index += directions.len() as i32; } current_direction = *directions.get(next_index as usize).unwrap(); } else if direction == 'F' { match current_direction { 'E' => east_west_pos += amount, 'W' => east_west_pos -= amount, 'N' => north_south_pos += amount, 'S' => north_south_pos -= amount, _ => println!("Unknown direction {}", direction), } } else { match direction { 'E' => east_west_pos += amount, 'W' => east_west_pos -= amount, 'N' => north_south_pos += amount, 'S' => north_south_pos -= amount, _ => println!("Unknown direction {}", direction), } } } println!("The ship's Manhattan distance is {}.", (east_west_pos.abs() + north_south_pos.abs())); } fn load_data() -> Vec<String> { let input = File::open("./data/day12.data").unwrap(); let reader = BufReader::new(input); let data: Vec<String> = reader.lines() .map(|l| { l.unwrap() }) .collect(); return data; }
mod common; mod list; mod medal; mod missing; mod recent; pub mod stats; use std::sync::Arc; use rosu_v2::prelude::Username; use twilight_model::application::{ command::CommandOptionChoice, interaction::{ application_command::{CommandDataOption, CommandOptionValue}, ApplicationCommand, }, }; use crate::{ commands::{ osu::{option_discord, option_name}, parse_discord, DoubleResultCow, MyCommand, MyCommandOption, }, custom_client::MEDAL_GROUPS, database::OsuData, util::{ constants::common_literals::{DISCORD, INDEX, NAME, REVERSE}, CowUtils, InteractionExt, MessageExt, }, BotResult, Context, Error, }; pub use self::{ common::*, list::*, medal::handle_autocomplete as handle_medal_autocomplete, medal::*, missing::*, recent::*, stats::*, }; use super::require_link; enum MedalCommandKind { Common(CommonArgs), List(ListArgs), Medal(String), Missing(Option<Username>), Recent(RecentArgs), Stats(Option<Username>), } async fn parse_username( ctx: &Context, command: &ApplicationCommand, options: Vec<CommandDataOption>, ) -> DoubleResultCow<Option<Username>> { let mut osu = None; for option in options { match option.value { CommandOptionValue::String(value) => match option.name.as_str() { NAME => osu = Some(value.into()), _ => return Err(Error::InvalidCommandOptions), }, CommandOptionValue::User(value) => match option.name.as_str() { DISCORD => match parse_discord(ctx, value).await? { Ok(osu_) => osu = Some(osu_), Err(content) => return Ok(Err(content)), }, _ => return Err(Error::InvalidCommandOptions), }, _ => return Err(Error::InvalidCommandOptions), } } let osu = match osu { Some(osu) => Some(osu), None => ctx.psql().get_user_osu(command.user_id()?).await?, }; Ok(Ok(osu.map(OsuData::into_username))) } impl MedalCommandKind { fn slash_info(mut options: Vec<CommandDataOption>) -> BotResult<Self> { options .pop() .and_then(|option| (option.name == NAME).then(|| option.value)) .and_then(|value| match value { CommandOptionValue::String(value) => Some(value), _ => None, }) .map(Self::Medal) .ok_or(Error::InvalidCommandOptions) } async fn slash(ctx: &Context, command: &mut ApplicationCommand) -> DoubleResultCow<Self> { let option = command .data .options .pop() .ok_or(Error::InvalidCommandOptions)?; match option.value { CommandOptionValue::SubCommand(options) => match option.name.as_str() { "common" => match CommonArgs::slash(ctx, command, options).await? { Ok(args) => Ok(Ok(Self::Common(args))), Err(content) => Ok(Err(content)), }, "info" => Self::slash_info(options).map(Ok), "list" => match ListArgs::slash(ctx, command, options).await? { Ok(args) => Ok(Ok(Self::List(args))), Err(content) => Ok(Err(content)), }, "missing" => match parse_username(ctx, command, options).await? { Ok(name) => Ok(Ok(Self::Missing(name))), Err(content) => Ok(Err(content)), }, "recent" => match RecentArgs::slash(ctx, command, options).await? { Ok(args) => Ok(Ok(Self::Recent(args))), Err(content) => Ok(Err(content)), }, "stats" => match parse_username(ctx, command, options).await? { Ok(name) => Ok(Ok(Self::Stats(name))), Err(content) => Ok(Err(content)), }, _ => Err(Error::InvalidCommandOptions), }, _ => Err(Error::InvalidCommandOptions), } } } pub async fn slash_medal(ctx: Arc<Context>, mut command: ApplicationCommand) -> BotResult<()> { match MedalCommandKind::slash(&ctx, &mut command).await? { Ok(MedalCommandKind::Common(args)) => _common(ctx, command.into(), args).await, Ok(MedalCommandKind::List(args)) => _medalslist(ctx, command.into(), args).await, Ok(MedalCommandKind::Medal(name)) => _medal(ctx, command.into(), &name).await, Ok(MedalCommandKind::Missing(config)) => _medalsmissing(ctx, command.into(), config).await, Ok(MedalCommandKind::Recent(args)) => _medalrecent(ctx, command.into(), args).await, Ok(MedalCommandKind::Stats(config)) => _medalstats(ctx, command.into(), config).await, Err(content) => command.error(&ctx, content).await, } } fn option_name_(n: u8) -> MyCommandOption { let mut name = option_name(); name.name = match n { 1 => "name1", 2 => "name2", _ => unreachable!(), }; name } fn option_discord_(n: u8) -> MyCommandOption { let mut discord = option_discord(); discord.name = match n { 1 => "discord1", 2 => "discord2", _ => unreachable!(), }; discord.help = if n == 1 { Some( "Instead of specifying an osu! username with the `name1` option, \ you can use this `discord1` option to choose a discord user.\n\ For it to work, the user must be linked to an osu! account i.e. they must have used \ the `/link` or `/config` command to verify their account.", ) } else { None }; discord } pub fn define_medal() -> MyCommand { let name1 = option_name_(1); let name2 = option_name_(2); let sort_choices = vec![ CommandOptionChoice::String { name: "Alphabetically".to_owned(), value: "alphabet".to_owned(), }, CommandOptionChoice::String { name: "Date First".to_owned(), value: "date_first".to_owned(), }, CommandOptionChoice::String { name: "Date Last".to_owned(), value: "date_last".to_owned(), }, CommandOptionChoice::String { name: "Rarity".to_owned(), value: "rarity".to_owned(), }, ]; let sort = MyCommandOption::builder("sort", "Specify a medal order").string(sort_choices, false); let filter_help = "Filter out some medals.\n\ If a medal group has been selected, only medals of that group will be shown."; let mut filter_choices = vec![ CommandOptionChoice::String { name: "None".to_owned(), value: "none".to_owned(), }, CommandOptionChoice::String { name: "Unique".to_owned(), value: "unique".to_owned(), }, ]; let filter_iter = MEDAL_GROUPS .iter() .map(|group| CommandOptionChoice::String { name: group.0.to_owned(), value: group.0.cow_replace(' ', "_").into_owned(), }); filter_choices.extend(filter_iter); let filter = MyCommandOption::builder("filter", "Filter out some medals") .help(filter_help) .string(filter_choices, false); let discord1 = option_discord_(1); let discord2 = option_discord_(2); let common_description = "Compare which of the given users achieved medals first"; let common = MyCommandOption::builder("common", common_description) .subcommand(vec![name1, name2, sort, filter, discord1, discord2]); let name_help = "Specify the name of a medal.\n\ Upper- and lowercase does not matter but punctuation is important."; let name = MyCommandOption::builder(NAME, "Specify the name of a medal") .autocomplete() .help(name_help) .string(Vec::new(), true); let info_help = "Display info about an osu! medal.\n\ The solution, beatmaps, and comments are provided by [osekai](https://osekai.net/)."; let info = MyCommandOption::builder("info", "Display info about an osu! medal") .help(info_help) .subcommand(vec![name]); let name = option_name(); let sort_choices = vec![ CommandOptionChoice::String { name: "Alphabetically".to_owned(), value: "alphabet".to_owned(), }, CommandOptionChoice::String { name: "Medal ID".to_owned(), value: "medal_id".to_owned(), }, CommandOptionChoice::String { name: "Date".to_owned(), value: "date".to_owned(), }, CommandOptionChoice::String { name: "Rarity".to_owned(), value: "rarity".to_owned(), }, ]; let sort = MyCommandOption::builder("sort", "Specify a medal order").string(sort_choices, false); let group_choices = MEDAL_GROUPS .iter() .map(|group| CommandOptionChoice::String { name: group.0.to_owned(), value: group.0.cow_replace(' ', "_").into_owned(), }) .collect(); let group = MyCommandOption::builder("group", "Only show medals of this group") .string(group_choices, false); let reverse = MyCommandOption::builder(REVERSE, "Reverse the resulting medal list").boolean(false); let discord = option_discord(); let list = MyCommandOption::builder("list", "List all achieved medals of a user") .subcommand(vec![name, sort, group, reverse, discord]); let name = option_name(); let discord = option_discord(); let missing = MyCommandOption::builder("missing", "Display a list of medals that a user is missing") .subcommand(vec![name, discord]); let name = option_name(); let discord = option_discord(); let index = MyCommandOption::builder(INDEX, "Specify an index e.g. 1 = most recent") .min_int(0) .max_int(100) .integer(Vec::new(), false); let recent_help = "Display a recently acquired medal of a user.\n\ The solution, beatmaps, and comments are provided by [osekai](https://osekai.net/)."; let recent = MyCommandOption::builder("recent", "Display a recently acquired medal of a user") .help(recent_help) .subcommand(vec![name, index, discord]); let name = option_name(); let discord = option_discord(); let stats = MyCommandOption::builder("stats", "Display medal stats for a user") .subcommand(vec![name, discord]); let help = "Info about a medal or users' medal progress.\n\ Check out [osekai](https://osekai.net/) for more info on medals."; MyCommand::new("medal", "Info about a medal or users' medal progress") .help(help) .options(vec![common, info, list, missing, recent, stats]) }
// Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::get_unix_ts; use anyhow::{Error, Result}; use futures::lock::Mutex; use hex; use libra_crypto::HashValue; use libra_logger::prelude::*; use libra_types::account_address::{AccountAddress, ADDRESS_LENGTH}; use rand::prelude::*; use std::collections::HashMap; use std::convert::{From, TryFrom}; use std::sync::Arc; #[derive(Clone)] pub struct InvoiceManager { r_hash_map: Arc<Mutex<HashMap<Vec<u8>, Vec<u8>>>>, r_hash_previous_hop_map: Arc<Mutex<HashMap<Vec<u8>, AccountAddress>>>, } #[derive(Clone, Debug, Eq, PartialEq)] pub struct Invoice { pub r_hash: Vec<u8>, pub amount: u64, pub receiver: AccountAddress, } impl Invoice { fn as_vec(&self) -> Vec<u8> { let mut result = Vec::new(); result.extend_from_slice(&self.receiver.to_vec()); result.extend_from_slice(&self.amount.to_be_bytes()); result.extend_from_slice(&self.r_hash); result } } impl TryFrom<Vec<u8>> for Invoice { type Error = Error; fn try_from(value: Vec<u8>) -> Result<Self> { let receiver = AccountAddress::try_from(&value[0..ADDRESS_LENGTH])?; let mut amount_bytes: [u8; 8] = [0; 8]; amount_bytes.copy_from_slice(&value[ADDRESS_LENGTH..ADDRESS_LENGTH + 8]); let amount = u64::from_be_bytes(amount_bytes); let r_hash = Vec::from(&value[ADDRESS_LENGTH + 8..]); Ok(Self { receiver, amount, r_hash, }) } } impl From<Invoice> for Vec<u8> { fn from(value: Invoice) -> Self { value.as_vec() } } impl TryFrom<String> for Invoice { type Error = Error; fn try_from(value: String) -> Result<Self> { let bytes_value = hex::decode(value)?; Invoice::try_from(bytes_value) } } impl From<Invoice> for String { fn from(value: Invoice) -> Self { hex::encode_upper(&value.as_vec()) } } impl InvoiceManager { pub fn new() -> Self { Self { r_hash_map: Arc::new(Mutex::new(HashMap::new())), r_hash_previous_hop_map: Arc::new(Mutex::new(HashMap::new())), } } pub async fn new_invoice(&self, amount: u64, receiver: AccountAddress) -> Invoice { let mut rng: StdRng = SeedableRng::seed_from_u64(get_unix_ts()); let preimage = HashValue::random_with_rng(&mut rng).to_vec(); let r_hash = HashValue::from_sha3_256(preimage.as_slice()).to_vec(); info!( "preimage is {},r_hash is {}", hex::encode(preimage.clone()), hex::encode(r_hash.clone()) ); self.r_hash_map .lock() .await .insert(r_hash.clone(), preimage.clone()); Invoice { r_hash, amount, receiver, } } pub async fn get_preimage(&self, r_hash: &HashValue) -> Option<Vec<u8>> { match self.r_hash_map.lock().await.get(&r_hash.to_vec()) { Some(v) => { let mut result = Vec::new(); result.extend_from_slice(v); return Some(result); } None => { return None; } }; } pub async fn add_previous_hop(&self, r_hash: HashValue, previous_addr: AccountAddress) { self.r_hash_previous_hop_map .lock() .await .insert(r_hash.to_vec(), previous_addr); } pub async fn get_previous_hop(&self, preimage: Vec<u8>) -> Option<AccountAddress> { let r_hash = HashValue::from_sha3_256(preimage.as_slice()).to_vec(); match self .r_hash_previous_hop_map .lock() .await .get(&r_hash.to_vec()) { Some(v) => { return Some(v.clone()); } None => { return None; } }; } } #[test] fn test_invoice() { use std::convert::TryInto; let preimage = HashValue::random().to_vec(); let r_hash = HashValue::from_sha3_256(preimage.as_slice()).to_vec(); let account_address = AccountAddress::random(); let amount = 1000; let invoice = Invoice { r_hash, amount, receiver: account_address, }; let invoice_string: String = invoice.clone().into(); let invoice_decode: Invoice = invoice_string.try_into().unwrap(); assert_eq!(invoice_decode.receiver, invoice.receiver); assert_eq!(invoice_decode.r_hash, invoice.r_hash); assert_eq!(invoice_decode.amount, invoice.amount); }
mod json; mod linefile; mod node; mod notebook; mod page; mod render; use crate::node::{parse_nodes, Node}; use crate::notebook::Notebook; use crate::render::render_notebook; use serde::{Deserialize, Serialize}; use std::error::Error; use std::fs::File; use std::path::Path; use std::path::PathBuf; use structopt::StructOpt; #[derive(StructOpt)] enum Command { SetDir { path: String, }, Tree, RenderNotebook { notebook: PathBuf, output_path: PathBuf, }, RenderAll { output_directory: PathBuf, }, } #[derive(Serialize, Deserialize, Default)] struct Config { xochitl_dir: String, } const APP_NAME: &str = "restorable"; fn main() { match run() { Ok(_) => {} Err(e) => { eprintln!("{}", e); } } } fn run() -> Result<(), Box<dyn Error>> { let mut config: Config = confy::load(APP_NAME)?; let command = Command::from_args(); match command { Command::SetDir { path } => { config.xochitl_dir = path; } Command::Tree => { check_configuration(&config)?; let root_node = parse_nodes(&config.xochitl_dir)?; for child in root_node.children.borrow().iter() { child.walk(&|node, ancestors| { for _ in ancestors { print!(" "); } println!("- {}", node.name()); }); } } Command::RenderNotebook { notebook, output_path, } => { check_configuration(&config)?; let root_node = parse_nodes(&config.xochitl_dir)?; match root_node.get_descendant_by_name(&notebook) { None => { eprintln!("Cannot find document {:#?}", notebook) } Some(node) => render(&config, &node, &output_path)?, } } Command::RenderAll { output_directory } => { check_configuration(&config)?; match output_directory.canonicalize() { Err(_) => eprintln!("Directory does not exist: {:#?}", output_directory), Ok(output_directory) => { let root_node = parse_nodes(&config.xochitl_dir)?; root_node.walk(&|node, ancestors| { if node.is_notebook() { let mut full_path = output_directory.clone(); for node in ancestors { full_path.push(node.name()); } match full_path.extension() { Some(_) => { // Nothing to do, already a rendered file. } None => { full_path.set_extension("pdf"); if let Some(parent) = full_path.parent() { match std::fs::create_dir_all(parent) { Err(_) => eprintln!( "WARNING: Failed to create directory {:#?}", parent ), Ok(_) => match render(&config, &node, &full_path) { Err(_) => eprintln!( "WARNING: Failed to render notebook '{}'", node.name() ), Ok(_) => {} }, } } } } } }); } } } } confy::store(APP_NAME, config)?; Ok(()) } fn check_configuration(config: &Config) -> Result<(), ConfigMissing> { if config.xochitl_dir.is_empty() { Err(ConfigMissing {}) } else { Ok(()) } } fn render(config: &Config, node: &Node, output_path: &Path) -> Result<(), Box<dyn Error>> { if node.is_notebook() { let filename = Path::join(&PathBuf::from(&config.xochitl_dir), &node.id); let filename = filename.to_str().unwrap(); let notebook = Notebook::load(filename)?; let mut file = File::create(output_path)?; println!("Rendering notebook {}...", node.name()); render_notebook(notebook, &mut file)?; } else { eprintln!("Not a notebook: {:#?}", node.name()); } Ok(()) } #[derive(Debug)] struct ConfigMissing; impl ConfigMissing { const MESSAGE: &'static str = "Please run `restorable set-dir /path/to/xochitl`"; } impl Error for ConfigMissing { fn description(&self) -> &str { Self::MESSAGE } } impl std::fmt::Display for ConfigMissing { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.write_str(Self::MESSAGE) } }
async fn foo(id: i32) { for i in 1..10 { println!("hi number {} in foo({}).", i, id); std::thread::sleep(std::time::Duration::from_millis(1000)); } } fn main() { let task = async { foo(10).await ; foo(20).await ; foo(30).await ; }; println!("program start."); futures::executor::block_on(task); println!("program end."); } async fn go() { foo(10).await; foo(20).await; foo(30).await; } fn main2() { println!("program start."); futures::executor::block_on(go()); println!("program end."); } use futures::executor::ThreadPool; use std::io::Read ; fn main3() { let mut pool = ThreadPool::new().unwrap(); println!("program start."); pool.spawn_ok(foo(10)); pool.spawn_ok(foo(20)); pool.spawn_ok(foo(30)); println!("press any key."); std::io::stdin().read(&mut [0]); println!("program end."); } fn main4() { let mut tpb = futures::executor::ThreadPoolBuilder::new(); // スレッドプールを2つに制限するため // 最初の2つだけ同時に動く let pb = tpb.pool_size(2); let pool = pb.create().unwrap(); println!("program start."); pool.spawn_ok(foo(10)); pool.spawn_ok(foo(20)); pool.spawn_ok(foo(30)); println!("press any key."); std::io::stdin().read(&mut [0]); println!("program end."); } /* async fn foo(id: i32) { for i in 1..10 { println!("hi number {} in foo({}).", i, id); tokio::time::delay_for(std::time::Duration::from_secs(1)).await; } } */ #[tokio::main] async fn main6() { println!("program start."); foo(10).await ; foo(20).await ; foo(30).await ; println!("program end."); } fn main7() { let mut rt = tokio::runtime::Runtime::new().unwrap(); println!("program start."); rt.block_on(async { foo(10).await ; foo(20).await ; foo(30).await ; }); println!("program end."); }
pub mod download_link; pub mod file_details; pub mod file_list; pub mod games; pub mod md5_search; pub mod mod_info; pub mod queriable; pub mod search; pub use self::download_link::*; pub use self::file_details::*; pub use self::file_list::*; pub use self::games::*; pub use self::md5_search::*; pub use self::mod_info::*; pub use self::queriable::*; pub use self::search::*;
// =============================================================================================== // Configuration // =============================================================================================== #![feature( allocator, const_fn, )] #![no_std] #![allocator] // =============================================================================================== // Extern // =============================================================================================== #[macro_use] extern crate lazy_static; extern crate linked_list_allocator; extern crate spin; extern "C" { fn heap_base(); fn heap_end(); } pub use linked_list_allocator::Heap; use spin::{Mutex, RwLock}; // =============================================================================================== // Statics // =============================================================================================== lazy_static! { pub static ref HEAP: Mutex<Heap> = Mutex::new(unsafe { Heap::new(heap_base as usize, heap_end as usize - heap_base as usize) }); } static HEAP_ALLOCATED: RwLock<usize> = RwLock::new(0); // =============================================================================================== // Rust Allocation Functions // =============================================================================================== #[no_mangle] pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 { *(HEAP_ALLOCATED.write()) += size; HEAP.lock().allocate_first_fit(size, align).expect("out of memory") } #[no_mangle] pub extern fn __rust_deallocate(ptr: *mut u8, size: usize, align: usize) { unsafe { HEAP.lock().deallocate(ptr, size, align) }; *(HEAP_ALLOCATED.write()) -= size; } #[no_mangle] pub extern fn __rust_usable_size(size: usize, _align: usize) -> usize { size } #[no_mangle] pub extern fn __rust_reallocate_inplace(_ptr: *mut u8, size: usize, _new_size: usize, _align: usize) -> usize { size } #[no_mangle] pub extern fn __rust_reallocate(ptr: *mut u8, size: usize, new_size: usize, align: usize) -> *mut u8 { use core::{ptr, cmp}; // from: https://github.com/rust-lang/rust/blob/ // c66d2380a810c9a2b3dbb4f93a830b101ee49cc2/ // src/liballoc_system/lib.rs#L98-L101 let new_ptr = __rust_allocate(new_size, align); unsafe { ptr::copy(ptr, new_ptr, cmp::min(size, new_size)) }; __rust_deallocate(ptr, size, align); new_ptr } // =============================================================================================== // Heap Information // =============================================================================================== pub extern fn heap_used() -> usize { *(HEAP_ALLOCATED.read()) } pub extern fn heap_free() -> usize { heap_size() - heap_used() } pub extern fn heap_size() -> usize { heap_end as usize - heap_base as usize }
// Copyright 2017 Dmitry Tantsur <divius.inside@gmail.com> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Generic API bits for implementing new services. use std::rc::Rc; use std::vec; use fallible_iterator::FallibleIterator; use super::super::{Error, ErrorKind, Result}; use super::super::session::Session; use super::super::utils::Query; use super::{ListResources, ResourceId}; /// Generic implementation of a `FallibleIterator` over resources. #[derive(Debug, Clone)] pub struct ResourceIterator<T> { session: Rc<Session>, query: Query, cache: Option<vec::IntoIter<T>>, marker: Option<String>, can_paginate: Option<bool>, } impl<T> ResourceIterator<T> { #[allow(dead_code)] // unused with --no-default-features pub(crate) fn new(session: Rc<Session>, query: Query) -> ResourceIterator<T> { let can_paginate = query.0.iter().all(|pair| { pair.0 != "limit" && pair.0 != "marker" }); ResourceIterator { session: session, query: query, cache: None, marker: None, can_paginate: if can_paginate { None // ask the service later } else { Some(false) } } } } impl<T> ResourceIterator<T> where T: ListResources + ResourceId { /// Assert that only one item is left and fetch it. /// /// Fails with `ResourceNotFound` if no items are left and with /// `TooManyItems` if there is more than one item left. pub fn one(mut self) -> Result<T> { match self.next()? { Some(result) => if self.next()?.is_some() { Err(Error::new(ErrorKind::TooManyItems, "Query returned more than one result")) } else { Ok(result) }, None => Err(Error::new(ErrorKind::ResourceNotFound, "Query returned no results")) } } } impl<T> FallibleIterator for ResourceIterator<T> where T: ListResources + ResourceId { type Item = T; type Error = Error; fn next(&mut self) -> Result<Option<T>> { if self.can_paginate.is_none() { self.can_paginate = Some(T::can_paginate(&self.session)?); } let maybe_next = self.cache.as_mut().and_then(|cache| cache.next()); Ok(if maybe_next.is_some() { maybe_next } else { if self.cache.is_some() && self.can_paginate == Some(false) { // We have exhausted the results and pagination is not possible None } else { let mut query = self.query.clone(); if self.can_paginate == Some(true) { // can_paginate=true implies no limit was provided query.push("limit", T::DEFAULT_LIMIT); if let Some(marker) = self.marker.take() { query.push_str("marker", marker); } } let mut servers_iter = T::list_resources(self.session.clone(), &query.0)? .into_iter(); let maybe_next = servers_iter.next(); self.cache = Some(servers_iter); maybe_next } }.map(|next| { self.marker = Some(next.resource_id()); next })) } } #[cfg(test)] mod test { use std::rc::Rc; use fallible_iterator::FallibleIterator; use serde_json::{self, Value}; use super::super::super::Result; use super::super::super::session::Session; use super::super::super::utils::{self, Query}; use super::super::{ListResources, ResourceId}; use super::ResourceIterator; #[derive(Debug, PartialEq, Eq)] struct Test(u8); impl ResourceId for Test { fn resource_id(&self) -> String { self.0.to_string() } } fn array_to_map(value: Vec<Value>) -> serde_json::Map<String, Value> { value.into_iter().map(|arr| { match arr { Value::Array(v) => match v[0] { Value::String(ref s) => (s.clone(), v[1].clone()), ref y => panic!("unexpected query key {:?}", y) }, x => panic!("unexpected query component {:?}", x) } }).collect() } impl ListResources for Test { const DEFAULT_LIMIT: usize = 2; fn list_resources<Q>(_session: Rc<Session>, query: Q) -> Result<Vec<Self>> where Q: ::serde::Serialize + ::std::fmt::Debug { let map = match serde_json::to_value(query).unwrap() { Value::Array(arr) => array_to_map(arr), x => panic!("unexpected query {:?}", x) }; assert_eq!(*map.get("limit").unwrap(), Value::String("2".into())); Ok(match map.get("marker") { Some(&Value::String(ref s)) if s == "1" => vec![Test(2), Test(3)], Some(&Value::String(ref s)) if s == "3" => Vec::new(), None => vec![Test(0), Test(1)], Some(ref x) => panic!("unexpected marker {:?}", x) }) } } #[derive(Debug, PartialEq, Eq)] struct NoPagination(u8); impl ListResources for NoPagination { const DEFAULT_LIMIT: usize = 2; fn can_paginate(_session: &Session) -> Result<bool> { Ok(false) } fn list_resources<Q>(_session: Rc<Session>, query: Q) -> Result<Vec<Self>> where Q: ::serde::Serialize + ::std::fmt::Debug { let map = match serde_json::to_value(query).unwrap() { Value::Array(arr) => array_to_map(arr), x => panic!("unexpected query {:?}", x) }; assert!(map.get("limit").is_none()); assert!(map.get("marker").is_none()); Ok(vec![NoPagination(0), NoPagination(1), NoPagination(2)]) } } impl ResourceId for NoPagination { fn resource_id(&self) -> String { self.0.to_string() } } #[test] fn test_resource_iterator() { let s = utils::test::new_session(utils::test::URL); let it: ResourceIterator<Test> = ResourceIterator::new(Rc::new(s), Query::new()); assert_eq!(it.collect::<Vec<Test>>().unwrap(), vec![Test(0), Test(1), Test(2), Test(3)]); } #[test] fn test_resource_iterator_no_pagination() { let s = utils::test::new_session(utils::test::URL); let it: ResourceIterator<NoPagination> = ResourceIterator::new(Rc::new(s), Query::new()); assert_eq!(it.collect::<Vec<NoPagination>>().unwrap(), vec![NoPagination(0), NoPagination(1), NoPagination(2)]); } }
fn sjf(jobs: &[usize], index: usize) -> usize { jobs.iter() .enumerate() .filter(|&(i, &x)| x < jobs[index] || x == jobs[index] && i <= index) .map(|(_, x)| x) .sum() } #[test] fn returns_expected() { assert_eq!(sjf(&[100], 0), 100); assert_eq!(sjf(&[3,10,20,1,2], 0), 6); assert_eq!(sjf(&[3,3,3,10,20,1,2], 1), 9); }
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AvailableRpOperation { #[serde(default, skip_serializing_if = "Option::is_none")] pub display: Option<AvailableRpOperationDisplayInfo>, #[serde(rename = "isDataAction", default, skip_serializing_if = "Option::is_none")] pub is_data_action: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<OperationMetaPropertyInfo>, #[serde(default, skip_serializing_if = "Option::is_none")] pub origin: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationResource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<operation_resource::Status>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<ErrorResponse>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")] pub end_time: Option<String>, #[serde(rename = "percentComplete", default, skip_serializing_if = "Option::is_none")] pub percent_complete: Option<f64>, } pub mod operation_resource { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { InProgress, Succeeded, Failed, Canceled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AvailableRpOperationDisplayInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub resource: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationMetaPropertyInfo { #[serde(rename = "serviceSpecification", default, skip_serializing_if = "Option::is_none")] pub service_specification: Option<OperationMetaServiceSpecification>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationMetaServiceSpecification { #[serde(rename = "metricSpecifications", default, skip_serializing_if = "Vec::is_empty")] pub metric_specifications: Vec<OperationMetaMetricSpecification>, #[serde(rename = "logSpecifications", default, skip_serializing_if = "Vec::is_empty")] pub log_specifications: Vec<OperationMetaLogSpecification>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationMetaMetricSpecification { #[serde(rename = "sourceMdmNamespace", default, skip_serializing_if = "Option::is_none")] pub source_mdm_namespace: Option<String>, #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "aggregationType", default, skip_serializing_if = "Option::is_none")] pub aggregation_type: Option<String>, #[serde(rename = "displayDescription", default, skip_serializing_if = "Option::is_none")] pub display_description: Option<String>, #[serde(rename = "sourceMdmAccount", default, skip_serializing_if = "Option::is_none")] pub source_mdm_account: Option<String>, #[serde(rename = "enableRegionalMdmAccount", default, skip_serializing_if = "Option::is_none")] pub enable_regional_mdm_account: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub dimensions: Vec<OperationMetaMetricDimensionSpecification>, #[serde(rename = "supportsInstanceLevelAggregation", default, skip_serializing_if = "Option::is_none")] pub supports_instance_level_aggregation: Option<bool>, #[serde(rename = "metricFilterPattern", default, skip_serializing_if = "Option::is_none")] pub metric_filter_pattern: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationMetaLogSpecification { #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "blobDuration", default, skip_serializing_if = "Option::is_none")] pub blob_duration: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationMetaMetricDimensionSpecification { #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "toBeExportedForShoebox", default, skip_serializing_if = "Option::is_none")] pub to_be_exported_for_shoebox: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<sql_pool_properties::Status>, #[serde(rename = "sqlPoolGuid", default, skip_serializing_if = "Option::is_none")] pub sql_pool_guid: Option<String>, #[serde(rename = "currentServiceObjectiveName", default, skip_serializing_if = "Option::is_none")] pub current_service_objective_name: Option<String>, #[serde(rename = "requestedServiceObjectiveName", default, skip_serializing_if = "Option::is_none")] pub requested_service_objective_name: Option<String>, #[serde(rename = "maxServiceObjectiveName", default, skip_serializing_if = "Option::is_none")] pub max_service_objective_name: Option<String>, #[serde(rename = "autoPauseTimer", default, skip_serializing_if = "Option::is_none")] pub auto_pause_timer: Option<i32>, #[serde(rename = "autoResume", default, skip_serializing_if = "Option::is_none")] pub auto_resume: Option<bool>, } pub mod sql_pool_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Invisible, Online, Offline, Creating, Inaccessible, Pausing, Paused, Resuming, Scaling, Dropping, Error, Unknown, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolV3 { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub sku: Option<SkuV3>, #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option<String>, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlPoolProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SkuV3 { pub name: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub tier: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolUpdate { #[serde(default, skip_serializing_if = "Option::is_none")] pub sku: Option<SkuV3>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlPoolProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlPoolListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SqlPoolV3>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlDatabaseProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<sql_database_properties::Status>, #[serde(default, skip_serializing_if = "Option::is_none")] pub collation: Option<String>, #[serde(rename = "databaseGuid", default, skip_serializing_if = "Option::is_none")] pub database_guid: Option<String>, #[serde(rename = "storageRedundancy", default, skip_serializing_if = "Option::is_none")] pub storage_redundancy: Option<sql_database_properties::StorageRedundancy>, #[serde(rename = "dataRetention", default, skip_serializing_if = "Option::is_none")] pub data_retention: Option<SqlDatabaseDataRetention>, } pub mod sql_database_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Online, Restoring, RecoveryPending, Recovering, Suspect, Offline, Standby, Shutdown, EmergencyMode, AutoClosed, Copying, Creating, Inaccessible, OfflineSecondary, Pausing, Paused, Resuming, Scaling, OfflineChangingDwPerformanceTiers, OnlineChangingDwPerformanceTiers, Disabled, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum StorageRedundancy { Local, Geo, Zone, GeoZone, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlDatabaseDataRetention { #[serde(rename = "retentionPeriod", default, skip_serializing_if = "Option::is_none")] pub retention_period: Option<String>, #[serde(rename = "dropRetentionPeriod", default, skip_serializing_if = "Option::is_none")] pub drop_retention_period: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlDatabase { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlDatabaseProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlDatabaseUpdate { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<SqlDatabaseProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SqlDatabaseListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<SqlDatabase>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<ErrorDetail>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorDetail { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub target: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub details: Vec<ErrorDetail>, #[serde(rename = "additionalInfo", default, skip_serializing_if = "Vec::is_empty")] pub additional_info: Vec<ErrorAdditionalInfo>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ErrorAdditionalInfo { #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub info: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SystemData { #[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")] pub created_by: Option<String>, #[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")] pub created_by_type: Option<system_data::CreatedByType>, #[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")] pub created_at: Option<String>, #[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")] pub last_modified_by: Option<String>, #[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")] pub last_modified_by_type: Option<system_data::LastModifiedByType>, #[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")] pub last_modified_at: Option<String>, } pub mod system_data { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum CreatedByType { User, Application, ManagedIdentity, Key, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum LastModifiedByType { User, Application, ManagedIdentity, Key, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TrackedResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, pub location: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, }
use super::{Camera, Location, SizeCollection}; use crate::{config::ExifConfig, models::size, tools::replace_pairs}; use chrono::{DateTime, FixedOffset}; use core::cmp::Ordering; use serde::{Deserialize, Serialize}; /// Unique path to any blog photo #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PhotoPath { pub post_path: String, pub photo_index: u8, } impl PhotoPath { pub fn post_url(&self) -> String { format!("{}#{:03}", self.post_path, self.photo_index) } pub fn thumb_url(&self, ext: &str) -> String { format!( "{}/{:03}_{}{}", self.post_path, self.photo_index, size::suffix::THUMB, ext ) } } #[derive(Debug, Default, Clone)] pub struct PhotoFile { /// File name of source image including extension pub name: String, /// Timestamp when the file was created pub created: i64, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Photo { /// Name of photographer recorded in EXIF #[serde(skip)] pub artist: Option<String>, /// Name of software used to process the photo #[serde(skip)] pub software: String, #[serde(skip)] pub title: Option<String>, #[serde(skip)] pub caption: Option<String>, /// Information about the camera used to make the photo #[serde(skip)] pub camera: Option<Camera>, /// Latitude and longitude where photo was taken #[serde(skip)] pub location: Option<Location>, /// One-based position of photo within post pub index: u8, // TODO: remove hash tags like #boisephotographer /// Tags applied to the photo #[serde(skip)] pub tags: Vec<String>, /// When the photograph was taken per camera EXIF #[serde(skip)] pub date_taken: Option<DateTime<FixedOffset>>, /// Whether taken date is an outlier (such an historic photo) compared to /// other photos in the same post. Outliers may be removed from mini-maps so /// the maps aren't overly zoomed-out. /// /// http://www.wikihow.com/Calculate-Outliers #[serde(skip)] pub outlier_date: bool, /// Sizes in which the photo is available pub size: SizeCollection, #[serde(skip)] pub file: PhotoFile, } impl Photo { /// Standardize EXIF data based on configuration and remove invalid values pub fn sanitize(&mut self, config: &ExifConfig) { self.software = replace_pairs(self.software.clone(), &config.software); if let Some(camera) = &mut self.camera { camera.name = replace_pairs(camera.name.clone(), &config.camera); if let Some(lens) = &camera.lens { camera.lens = Some(replace_pairs(lens.clone(), &config.lens)); } } if let Some(l) = &self.location { if !l.is_valid() { // remove invalid location self.location = None; } } } pub fn json_ld(&self) -> serde_json::Value { let size = &self.size.medium; // TODO: needs full image path? serde_json::json!({ "@type": "ImageObject", "url": size.name, "width": size.width, "height": size.height }) } /// Whether photo is in portrait orientation (taller than wide) pub fn is_portrait(&self) -> bool { self.size.is_portrait() } /// Image width divided by height pub fn aspect_ratio(&self) -> f32 { self.size.aspect_ratio() } } impl PartialOrd for Photo { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.index.partial_cmp(&other.index) } } impl Ord for Photo { fn cmp(&self, other: &Photo) -> Ordering { self.index.cmp(&other.index) } } impl PartialEq for Photo { fn eq(&self, other: &Self) -> bool { self.file.name == other.file.name && self.date_taken == other.date_taken } } impl Eq for Photo {} impl Default for Photo { fn default() -> Self { Photo { file: PhotoFile { name: String::new(), created: 0, }, artist: None, software: String::new(), title: None, caption: None, camera: None, location: None, index: 0, tags: Vec::new(), date_taken: None, outlier_date: false, size: SizeCollection::default(), } } }
//! Defines additional methods on RistrettoPoint for Lizard #![allow(non_snake_case)] use digest::Digest; use digest::generic_array::typenum::U32; use constants; use field::FieldElement; use subtle::ConditionallySelectable; use subtle::ConstantTimeEq; use subtle::Choice; use edwards::EdwardsPoint; use lizard::jacobi_quartic::JacobiPoint; use lizard::lizard_constants; #[allow(unused_imports)] use prelude::*; use ristretto::RistrettoPoint; impl RistrettoPoint { /// Directly encode 253 bits as a RistrettoPoint, using Elligator pub fn from_uniform_bytes_single_elligator(bytes: &[u8; 32]) -> RistrettoPoint { RistrettoPoint::elligator_ristretto_flavor(&FieldElement::from_bytes(&bytes)) } /// Encode 16 bytes of data to a RistrettoPoint, using the Lizard method pub fn lizard_encode<D: Digest>(data: &[u8; 16]) -> RistrettoPoint where D: Digest<OutputSize = U32> { let mut fe_bytes: [u8;32] = Default::default(); let digest = D::digest(data); fe_bytes[0..32].copy_from_slice(digest.as_slice()); fe_bytes[8..24].copy_from_slice(data); fe_bytes[0] &= 254; // make positive since Elligator on r and -r is the same fe_bytes[31] &= 63; let fe = FieldElement::from_bytes(&fe_bytes); RistrettoPoint::elligator_ristretto_flavor(&fe) } /// Decode 16 bytes of data from a RistrettoPoint, using the Lizard method pub fn lizard_decode<D: Digest>(&self) -> Option<[u8; 16]> where D: Digest<OutputSize = U32> { let mut result: [u8; 16] = Default::default(); let mut h: [u8;32] = Default::default(); let (mask, fes) = self.elligator_ristretto_flavor_inverse(); let mut n_found = 0; for j in 0..8 { let mut ok = Choice::from((mask >> j) & 1); let buf2 = fes[j].to_bytes(); // array h.copy_from_slice(&D::digest(&buf2[8..24])); // array h[8..24].copy_from_slice(&buf2[8..24]); h[0] &= 254; h[31] &= 63; ok &= h.ct_eq(&buf2); for i in 0..16 { result[i] = u8::conditional_select(&result[i], &buf2[8+i], ok); } n_found += ok.unwrap_u8(); } if n_found == 1 { return Some(result); } else { return None; } } /// Directly encode 253 bits as a RistrettoPoint, using Elligator pub fn encode_253_bits(data: &[u8; 32]) -> Option<RistrettoPoint> { if data.len() != 32 { return None; } let fe = FieldElement::from_bytes(data); let p = RistrettoPoint::elligator_ristretto_flavor(&fe); Some(p) } /// Directly decode a RistrettoPoint as 253 bits, using Elligator pub fn decode_253_bits(&self) -> (u8, [[u8; 32]; 8]) { let mut ret = [ [0u8; 32]; 8]; let (mask, fes) = self.elligator_ristretto_flavor_inverse(); for j in 0..8 { ret[j] = fes[j].to_bytes(); } (mask, ret) } /// Return the coset self + E[4], for debugging. pub fn xcoset4(&self) -> [EdwardsPoint; 4] { [ self.0 , &self.0 + &constants::EIGHT_TORSION[2] , &self.0 + &constants::EIGHT_TORSION[4] , &self.0 + &constants::EIGHT_TORSION[6] ] } /// Computes the at most 8 positive FieldElements f such that /// self == elligator_ristretto_flavor(f). /// Assumes self is even. /// /// Returns a bitmask of which elements in fes are set. pub fn elligator_ristretto_flavor_inverse(&self) -> (u8, [FieldElement; 8]) { // Elligator2 computes a Point from a FieldElement in two steps: first // it computes a (s,t) on the Jacobi quartic and then computes the // corresponding even point on the Edwards curve. // // We invert in three steps. Any Ristretto point has four representatives // as even Edwards points. For each of those even Edwards points, // there are two points on the Jacobi quartic that map to it. // Each of those eight points on the Jacobi quartic might have an // Elligator2 preimage. // // Essentially we first loop over the four representatives of our point, // then for each of them consider both points on the Jacobi quartic and // check whether they have an inverse under Elligator2. We take the // following shortcut though. // // We can compute two Jacobi quartic points for (x,y) and (-x,-y) // at the same time. The four Jacobi quartic points are two of // such pairs. let mut mask : u8 = 0; let jcs = self.to_jacobi_quartic_ristretto(); let mut ret = [FieldElement::one(); 8]; for i in 0..4 { let (ok, fe) = jcs[i].elligator_inv(); let mut tmp : u8 = 0; ret[2*i] = fe; tmp.conditional_assign(&1, ok); mask |= tmp << (2 * i); let jc = jcs[i].dual(); let (ok, fe) = jc.elligator_inv(); let mut tmp : u8 = 0; ret[2*i+1] = fe; tmp.conditional_assign(&1, ok); mask |= tmp << (2 * i + 1); } return (mask, ret) } /// Find a point on the Jacobi quartic associated to each of the four /// points Ristretto equivalent to p. /// /// There is one exception: for (0,-1) there is no point on the quartic and /// so we repeat one on the quartic equivalent to (0,1). fn to_jacobi_quartic_ristretto(&self) -> [JacobiPoint; 4] { let x2 = self.0.X.square(); // X^2 let y2 = self.0.Y.square(); // Y^2 let y4 = y2.square(); // Y^4 let z2 = self.0.Z.square(); // Z^2 let z_min_y = &self.0.Z - &self.0.Y; // Z - Y let z_pl_y = &self.0.Z + &self.0.Y; // Z + Y let z2_min_y2 = &z2 - &y2; // Z^2 - Y^2 // gamma := 1/sqrt( Y^4 X^2 (Z^2 - Y^2) ) let (_, gamma) = (&(&y4 * &x2) * &z2_min_y2).invsqrt(); let den = &gamma * &y2; let s_over_x = &den * &z_min_y; let sp_over_xp = &den * &z_pl_y; let s0 = &s_over_x * &self.0.X; let s1 = &(-(&sp_over_xp)) * &self.0.X; // t_0 := -2/sqrt(-d-1) * Z * sOverX // t_1 := -2/sqrt(-d-1) * Z * spOverXp let tmp = &lizard_constants::MDOUBLE_INVSQRT_A_MINUS_D * &self.0.Z; let mut t0 = &tmp * &s_over_x; let mut t1 = &tmp * &sp_over_xp; // den := -1/sqrt(1+d) (Y^2 - Z^2) gamma let den = &(&(-(&z2_min_y2)) * &lizard_constants::MINVSQRT_ONE_PLUS_D) * &gamma; // Same as before but with the substitution (X, Y, Z) = (Y, X, i*Z) let iz = &constants::SQRT_M1 * &self.0.Z; // iZ let iz_min_x = &iz - &self.0.X; // iZ - X let iz_pl_x = &iz + &self.0.X; // iZ + X let s_over_y = &den * &iz_min_x; let sp_over_yp = &den * &iz_pl_x; let mut s2 = &s_over_y * &self.0.Y; let mut s3 = &(-(&sp_over_yp)) * &self.0.Y; // t_2 := -2/sqrt(-d-1) * i*Z * sOverY // t_3 := -2/sqrt(-d-1) * i*Z * spOverYp let tmp = &lizard_constants::MDOUBLE_INVSQRT_A_MINUS_D * &iz; let mut t2 = &tmp * &s_over_y; let mut t3 = &tmp * &sp_over_yp; // Special case: X=0 or Y=0. Then return // // (0,1) (1,-2i/sqrt(-d-1) (-1,-2i/sqrt(-d-1)) // // Note that if X=0 or Y=0, then s_i = t_i = 0. let x_or_y_is_zero = self.0.X.is_zero() | self.0.Y.is_zero(); t0.conditional_assign(&FieldElement::one(), x_or_y_is_zero); t1.conditional_assign(&FieldElement::one(), x_or_y_is_zero); t2.conditional_assign(&lizard_constants::MIDOUBLE_INVSQRT_A_MINUS_D, x_or_y_is_zero); t3.conditional_assign(&lizard_constants::MIDOUBLE_INVSQRT_A_MINUS_D, x_or_y_is_zero); s2.conditional_assign(&FieldElement::one(), x_or_y_is_zero); s3.conditional_assign(&(-(&FieldElement::one())), x_or_y_is_zero); return [ JacobiPoint{S: s0, T: t0}, JacobiPoint{S: s1, T: t1}, JacobiPoint{S: s2, T: t2}, JacobiPoint{S: s3, T: t3}, ] } } // ------------------------------------------------------------------------ // Tests // ------------------------------------------------------------------------ #[cfg(all(test, feature = "stage2_build"))] mod test { extern crate sha2; #[cfg(feature = "rand")] use rand_os::OsRng; use rand_core::{RngCore}; use self::sha2::{Sha256}; use ristretto::CompressedRistretto; use super::*; fn test_lizard_encode_helper(data: &[u8; 16], result: &[u8; 32]) { let p = RistrettoPoint::lizard_encode::<Sha256>(data).unwrap(); let p_bytes = p.compress().to_bytes(); assert!(&p_bytes == result); let p = CompressedRistretto::from_slice(&p_bytes).decompress().unwrap(); let data_out = p.lizard_decode::<Sha256>().unwrap(); assert!(&data_out == data); } #[test] fn test_lizard_encode() { test_lizard_encode_helper(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], &[0xf0, 0xb7, 0xe3, 0x44, 0x84, 0xf7, 0x4c, 0xf0, 0xf, 0x15, 0x2, 0x4b, 0x73, 0x85, 0x39, 0x73, 0x86, 0x46, 0xbb, 0xbe, 0x1e, 0x9b, 0xc7, 0x50, 0x9a, 0x67, 0x68, 0x15, 0x22, 0x7e, 0x77, 0x4f]); test_lizard_encode_helper(&[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1], &[0xcc, 0x92, 0xe8, 0x1f, 0x58, 0x5a, 0xfc, 0x5c, 0xaa, 0xc8, 0x86, 0x60, 0xd8, 0xd1, 0x7e, 0x90, 0x25, 0xa4, 0x44, 0x89, 0xa3, 0x63, 0x4, 0x21, 0x23, 0xf6, 0xaf, 0x7, 0x2, 0x15, 0x6e, 0x65]); test_lizard_encode_helper(&[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15], &[0xc8, 0x30, 0x57, 0x3f, 0x8a, 0x8e, 0x77, 0x78, 0x67, 0x1f, 0x76, 0xcd, 0xc7, 0x96, 0xdc, 0xa, 0x23, 0x5c, 0xf1, 0x77, 0xf1, 0x97, 0xd9, 0xfc, 0xba, 0x6, 0xe8, 0x4e, 0x96, 0x24, 0x74, 0x44]); } #[test] fn test_elligator_inv() { let mut rng = rand::thread_rng(); for i in 0..100 { let mut fe_bytes = [0u8; 32]; if i == 0 { // Test for first corner-case: fe = 0 fe_bytes = [0u8; 32]; } else if i == 1 { // Test for second corner-case: fe = +sqrt(i*d) fe_bytes = [168, 27, 92, 74, 203, 42, 48, 117, 170, 109, 234, 14, 45, 169, 188, 205, 21, 110, 235, 115, 153, 84, 52, 117, 151, 235, 123, 244, 88, 85, 179, 5]; } else { // For the rest, just generate a random field element to test. rng.fill_bytes(&mut fe_bytes); } fe_bytes[0] &= 254; // positive fe_bytes[31] &= 127; // < 2^255-19 let fe = FieldElement::from_bytes(&fe_bytes); let pt = RistrettoPoint::elligator_ristretto_flavor(&fe); for pt2 in &pt.xcoset4() { let (mask, fes) = RistrettoPoint(*pt2).elligator_ristretto_flavor_inverse(); let mut found = false; for j in 0..8 { if mask & (1 << j) != 0 { assert_eq!(RistrettoPoint::elligator_ristretto_flavor(&fes[j]), pt); if fes[j] == fe { found = true; } } } assert!(found); } } } }
tonic::include_proto!("common/common");
use usd_plugin::info::{PluginInfo, PluginVariants}; #[test] fn deserialize_single_plugin_info() { let data = r#" { "Type": "library", "Name": "MyPlugin", "Root": "/foo", "LibraryPath": "lib", "ResourcePath": "resources", "Info": { "value": 1 } }"#; let plugin_info: PluginInfo = serde_json::from_str::<PluginVariants>(data).unwrap().into(); assert_eq!(plugin_info.includes.len(), 0); assert_eq!(plugin_info.plugins.len(), 1) } #[test] fn deserialize_multi_plugin_info() { let data = r#" { "Includes": [ "/absolute/path/to/plugInfo.json", "/absolute/path/to/custom.filename", "/absolute/path/to/directory/with/plugInfo/", "relative/path/to/plugInfo.json", "relative/path/to/directory/with/plugInfo/", "glob*/pa*th/*to*/*/plugInfo.json", "recursive/pa**th/**/" ], "Plugins": [ { "Type": "library", "Name": "MyPlugin", "Root": "/foo", "LibraryPath": "lib", "ResourcePath": "resources", "Info": { "value": 1 } } ] }"#; let plugin_info: PluginInfo = serde_json::from_str::<PluginVariants>(data).unwrap().into(); assert_eq!(plugin_info.includes.len(), 7); assert_eq!(plugin_info.plugins.len(), 1) }