text
stringlengths
8
4.13M
use std::{ error, io::{self, prelude::*}, net, process, str, }; extern crate ctrlc; fn main() -> Result<(), Box<dyn error::Error>>{ let mut stream = net::TcpStream::connect("127.0.0.1:50000")?; let s = stream.try_clone()?; ctrlc::set_handler(move || { s.shutdown(net::Shutdown::Both).unwrap(); })?; loop { let mut input = String::new(); io::stdin().read_line(&mut input)?; stream.write_all(input.as_bytes())?; let mut reader = io::BufReader::new(&stream); reader.fill_buf()?; print!("{}", str::from_utf8(reader.buffer())?); } }
use config::Config; use hbs::Template; use rustc_serialize::json::{Json, ToJson}; use iron::prelude::*; use std::io::prelude::*; use std::str::FromStr; use iron::status; use rss::Channel; use hyper::client::Client; use std::collections::BTreeMap; pub fn index(req: &mut Request) -> IronResult<Response> { let mut res = Response::new(); let mut data = BTreeMap::<String, Json>::new(); let config = req.extensions.get::<Config>().unwrap(); data.insert("config".to_owned(), config.to_json()); res.set_mut(Template::new("index", data)) .set_mut(status::Ok); Ok(res) } pub fn rss(req: &mut Request) -> IronResult<Response> { let mut channels = vec![]; let config = req.extensions.get::<Config>().unwrap(); let client = Client::new(); for channel in config.channels() { let req = client.get(&channel.url); let returned = req.send(); match returned { Ok(mut res) => { let mut body = String::new(); let _ = res.read_to_string(&mut body); let channel = Channel::from_str(&body).unwrap(); channels.push(channel); // let mut history = BTreeMap::new(); // history.insert("", rss. } Err(e) => { println!("{:?}", e); } } } let items: Vec<_> = channels.into_iter().flat_map(|c| c.items).collect(); println!("{:?}", items); let rss_result = Channel { title: "".to_owned(), link: "/rss".to_owned(), description: "".to_owned(), items: items, ..Default::default() }; let mut res = Response::with((status::Ok, rss_result.to_string())); res.headers.set_raw("Content-Type", vec![b"application/xml".to_vec()]); Ok(res) }
// **services** are a collection of // **ports** aro an abstract collection of // **operations** aro an abstract action definition mod wsdl; #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } }
use std::collections::HashSet; use std::iter::FromIterator; use std::u32; use std::cmp; pub fn problem_044() -> u32 { let pentagonal: HashSet<u32> = HashSet::from_iter((1..10000).map(|n| n * (3 * n - 1) / 2 as u32)); let mut min_diff = u32::MAX; for i in 2..10000 { let pent_i: u32 = i * (3 * i - 1) / 2; let mut min_j: u32 = 1; if pent_i > min_diff { min_j = (-0.5/3.0 + (0.25 - 6.0 * ((pent_i - min_diff) as f64).sqrt())/3.) as u32; min_j = cmp::max(1,min_j); } for j in min_j..i { let pent_j: u32 = j * (3 * j - 1) / 2; let diff: u32 = pent_i - pent_j; let sum: u32= pent_i + pent_j; if pentagonal.contains(&diff) & pentagonal.contains(&sum) { if diff < min_diff { min_diff = diff; } } } } min_diff } #[cfg(test)] mod test { use super::*; use test::Bencher; #[test] fn test_problem_044() { let ans: u32 = problem_044(); println!("Answer to Problem 44: {}", ans); assert!(ans == 5482660) } #[bench] fn bench_problem_044(b: &mut Bencher) { b.iter(|| problem_044()); } }
use ast::Ast; #[allow(unused_imports)] use nom::*; use datatype::Datatype; use std::str::FromStr; use std::str; named!(float_raw<f64>, do_parse!( float_string: float_structure >> (f64::from_str(float_string.as_str()).unwrap()) ) ); named!(float_structure<String>, do_parse!( basis: digit >> char!('.') >> decimal: digit >> // These unwraps are safe, because u8 slices accepted by digit() will always be accepted by from_utf8 (str::from_utf8(basis).unwrap().to_string() + "." + str::from_utf8(decimal).unwrap()) ) ); named!(pub float_literal<Ast>, do_parse!( num: ws!(float_raw) >> (Ast::Literal ( Datatype::Float(num))) ) ); #[test] fn parse_float_test() { let (_, value) = match float_raw(b"42.0") { IResult::Done(r, v) => (r, v), IResult::Error(e) => panic!("{:?}", e), _ => panic!(), }; assert_eq!(42.0, value) }
use super::{DatabaseClient, UserDefinedFunctionClient}; use crate::clients::*; use crate::operations::*; use crate::requests; use crate::resources::ResourceType; use crate::CosmosEntity; use crate::ReadonlyString; use azure_core::{pipeline::Pipeline, Context, HttpClient, Request}; use serde::Serialize; /// A client for Cosmos collection resources. #[derive(Debug, Clone)] pub struct CollectionClient { database_client: DatabaseClient, collection_name: ReadonlyString, } impl CollectionClient { pub(crate) fn new<S: Into<ReadonlyString>>( database_client: DatabaseClient, collection_name: S, ) -> Self { Self { database_client, collection_name: collection_name.into(), } } /// Get a [`CosmosClient`]. pub fn cosmos_client(&self) -> &CosmosClient { self.database_client.cosmos_client() } /// Get a [`DatabaseClient`]. pub fn database_client(&self) -> &DatabaseClient { &self.database_client } /// Get the collection name pub fn collection_name(&self) -> &str { &self.collection_name } /// Get a collection pub async fn get_collection( &self, ctx: Context, options: GetCollectionOptions, ) -> crate::Result<GetCollectionResponse> { let mut request = self.prepare_request_with_collection_name(http::Method::GET); options.decorate_request(&mut request)?; let response = self .pipeline() .send( &mut ctx.clone().insert(ResourceType::Collections), &mut request, ) .await?; Ok(GetCollectionResponse::try_from(response).await?) } /// Delete a collection pub async fn delete_collection( &self, ctx: Context, options: DeleteCollectionOptions, ) -> crate::Result<DeleteCollectionResponse> { let mut request = self.prepare_request_with_collection_name(http::Method::DELETE); options.decorate_request(&mut request)?; let response = self .pipeline() .send( &mut ctx.clone().insert(ResourceType::Collections), &mut request, ) .await?; Ok(DeleteCollectionResponse::try_from(response).await?) } /// Replace a collection pub async fn replace_collection( &self, ctx: Context, options: ReplaceCollectionOptions, ) -> crate::Result<ReplaceCollectionResponse> { let mut request = self.prepare_request_with_collection_name(http::Method::PUT); options.decorate_request(&mut request, self.collection_name())?; let response = self .pipeline() .send( &mut ctx.clone().insert(ResourceType::Collections), &mut request, ) .await?; Ok(ReplaceCollectionResponse::try_from(response).await?) } /// list documents in a collection pub fn list_documents(&self) -> requests::ListDocumentsBuilder<'_, '_> { requests::ListDocumentsBuilder::new(self) } /// create a document in a collection pub async fn create_document<'a, D: Serialize + CosmosEntity<'a>>( &self, ctx: Context, document: &'a D, options: CreateDocumentOptions<'_>, ) -> crate::Result<CreateDocumentResponse> { let mut request = self.prepare_doc_request_pipeline(http::Method::POST); options.decorate_request(&mut request, document)?; let response = self .pipeline() .send( &mut ctx.clone().insert(ResourceType::Documents), &mut request, ) .await?; Ok(CreateDocumentResponse::try_from(response).await?) } /// query documents in a collection pub fn query_documents(&self) -> requests::QueryDocumentsBuilder<'_, '_> { requests::QueryDocumentsBuilder::new(self) } /// list stored procedures in a collection pub fn list_stored_procedures(&self) -> requests::ListStoredProceduresBuilder<'_, '_> { requests::ListStoredProceduresBuilder::new(self) } /// list user defined functions in a collection pub fn list_user_defined_functions(&self) -> requests::ListUserDefinedFunctionsBuilder<'_, '_> { requests::ListUserDefinedFunctionsBuilder::new(self) } /// list triggers in a collection pub fn list_triggers(&self) -> requests::ListTriggersBuilder<'_, '_> { requests::ListTriggersBuilder::new(self) } /// list the partition key ranges in a collection pub fn get_partition_key_ranges(&self) -> requests::GetPartitionKeyRangesBuilder<'_, '_> { requests::GetPartitionKeyRangesBuilder::new(self) } /// convert into a [`DocumentClient`] pub fn into_document_client<S: Into<String>, PK: Serialize>( self, document_name: S, partition_key: &PK, ) -> Result<DocumentClient, serde_json::Error> { DocumentClient::new(self, document_name, partition_key) } /// convert into a [`TriggerClient`] pub fn into_trigger_client<S: Into<ReadonlyString>>(self, trigger_name: S) -> TriggerClient { TriggerClient::new(self, trigger_name) } /// convert into a [`UserDefinedFunctionClient`] pub fn into_user_defined_function_client<S: Into<ReadonlyString>>( self, user_defined_function_name: S, ) -> UserDefinedFunctionClient { UserDefinedFunctionClient::new(self, user_defined_function_name) } /// convert into a [`StoredProcedureClient`] pub fn into_stored_procedure_client<S: Into<ReadonlyString>>( self, stored_procedure_name: S, ) -> StoredProcedureClient { StoredProcedureClient::new(self, stored_procedure_name) } fn prepare_request_with_collection_name(&self, http_method: http::Method) -> Request { let path = &format!( "dbs/{}/colls/{}", self.database_client().database_name(), self.collection_name() ); self.cosmos_client() .prepare_request_pipeline(path, http_method) } pub(crate) fn http_client(&self) -> &dyn HttpClient { self.cosmos_client().http_client() } pub(crate) fn pipeline(&self) -> &Pipeline { self.cosmos_client().pipeline() } fn prepare_doc_request_pipeline(&self, http_method: http::Method) -> Request { let path = &format!( "dbs/{}/colls/{}/docs", self.database_client().database_name(), self.collection_name() ); self.cosmos_client() .prepare_request_pipeline(path, http_method) } }
pub mod sphere; use super::ray::Ray; pub trait Shape { fn intersect(&self, r: &Ray) -> Option<f64>; }
const HAND_CAPACITY : usize = 7; use super::{Tile, TileBag}; /// Stores a vector of tiles pub struct Hand { tiles : Vec<Tile>, } impl Hand { /// Create a new empty Hand pub fn new() -> Hand { Hand { tiles : Vec::with_capacity(HAND_CAPACITY), } } /// Take tiles from a bag /// /// Argument: /// * `bag` - A mutable reference to the Bag to draw from pub fn draw(&mut self, bag : &mut TileBag) { while self.tiles.len() < HAND_CAPACITY { let new_tile = bag.pick(); if let None = new_tile { return; } let new_tile = new_tile.unwrap(); self.tiles.push(new_tile); } } /// Remove one or more tiles and return them /// /// It takes a vector of character and return the associated tiles /// (including wildcard if it exists) or None if there is no letter /// matching the one asked. /// /// # Argument /// `remove` - The chars to remove pub fn remove(&mut self, remove : &Vec<char>) -> Option<Vec<Tile>> { let mut ret : Vec<Tile> = Vec::with_capacity(7); if !self.contains(remove) { return None; } for c in remove { match self.tiles.iter() .position(|tile| tile.letter() == *c) { None => { // We don't have this letter, but we have a wildcard let index = self.tiles.iter() .position(|tile| tile.wildcard() == true).unwrap(); ret.push(self.tiles.swap_remove(index)); }, Some(index) => { ret.push(self.tiles.swap_remove(index)); } } } if ret.len() > 0 { return Some(ret); } else { return None; } } /// Get a copy of the tiles in hand pub fn get(&self) -> Vec<Tile> { self.tiles.clone() } /// Whether some tile are present /// /// Tells if there is a different tile for each character in `elem` /// # Argument /// * `elem` - The vector of letter to verify they match a different tile pub fn contains(&self, elem : &Vec<char>) -> bool { let mut tmp_tiles = self.tiles.clone(); for c in elem { match tmp_tiles.iter().position(|tile| tile.letter() == *c) { None => { // We don't have this letter, but we have a wildcard match tmp_tiles.iter().position(|tile| tile.wildcard() == true) { None => { return false; }, Some(index) => { tmp_tiles.remove(index); } } }, Some(index) => { tmp_tiles.remove(index); } } } true } }
use crate::utils; pub mod button; pub mod card; use button::Button; use card::Card; use utils::{BotUser}; use serde::ser::{Serialize ,Serializer}; use serde_json::Value; use log::{info, warn}; use std::fmt; use ureq::*; use std::sync::Arc; pub enum MessagingType { RESPONSE, UPDATE, MESSAGETAG, } impl fmt::Display for MessagingType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { MessagingType::RESPONSE => write!(f,"RESPONSE"), MessagingType::UPDATE => write!(f,"UPDATE"), MessagingType::MESSAGETAG => write!(f,"MESSAGE_TAG"), } } } impl Serialize for MessagingType { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { MessagingType::RESPONSE => serializer.serialize_str("RESPONSE"), MessagingType::UPDATE => serializer.serialize_str("UPDATE"), MessagingType::MESSAGETAG => serializer.serialize_str("MESSAGE_TAG"), } } } pub trait ApiMessage { fn send(&self, user: &BotUser, token: &str); } #[derive(Clone)] pub struct Message { text: Option<String>, buttons: Option<Vec<Button>>, cards: Option<Vec<Arc<dyn Card>>>, } impl ApiMessage for Message { fn send(&self, user: &BotUser, token: &str) { fn send_json(value: serde_json::Value, token: &str) { let url = format!("https://graph.facebook.com/v9.0/me/messages?access_token={}",token); info!("Json value : {}",value.to_string()); let resp = ureq::post(&url) .send_json(value); if resp.ok() { info!("success: {}", resp.into_string().unwrap()); } else { warn!("error {}: {}", resp.status(), resp.into_string().unwrap()); } } if token.is_empty() { warn!("Message doesn't have a access_token"); } else if self.text.is_some() { let json = self::json!( { "messaging_type": MessagingType::RESPONSE, "recipient": { "id": user.get_sender() }, "message": { "text": self.text, "quick_replies": self.buttons, } } ); send_json(json,token); } else if self.cards.is_some() { let card = self.cards.as_ref().unwrap(); let cards: Vec<Value> = card.iter().map(|e| e.clone().to_json()).collect(); let payload: Value = match card[0].typed() { "generic" => { self::json!({"template_type": "generic", "elements": cards}) }, "buttons" => { let value: Value = card[0].clone().to_json(); value } _ => { self::json!({}) } }; let json = self::json!( { "messaging_type": MessagingType::RESPONSE, "recipient": { "id": user.get_sender() }, "message": { "attachment": { "type":"template", "payload": payload } } } ); send_json(json,token); } } } impl Message { pub fn new(text : Option<String>,buttons: Option<Vec<Button>>, cards: Option<Vec<Arc<dyn Card>>>) -> Self { Message{ text: text, buttons: buttons, cards: cards } } }
#[derive(Default, Debug)] struct Passport { birth_year: Option<String>, issue_year: Option<String>, expiration_year: Option<String>, height: Option<String>, hair_color: Option<String>, eye_color: Option<String>, passport_id: Option<String>, country_id: Option<String> } #[derive(Debug)] enum PassportError { BirthYear, IssueYear, ExpirationYear, Height, HairColor, EyeColor, PassportId } fn validate_year_option(year: &Option<String>, min: u32, max: u32) -> bool { match year { None => false, Some(year) => { let year_int: u32 = year.parse().unwrap(); year_int >= min && year_int <= max } } } impl Passport { fn consume_token(&mut self, kvpair: &str) { let mut tokens = kvpair.split(':'); let key = tokens.next().unwrap(); let value = String::from(tokens.next().unwrap()); match key { "byr" => self.birth_year = Some(value), "iyr" => self.issue_year = Some(value), "eyr" => self.expiration_year = Some(value), "hgt" => self.height = Some(value), "hcl" => self.hair_color = Some(value), "ecl" => self.eye_color = Some(value), "pid" => self.passport_id = Some(value), "cid" => self.country_id = Some(value), _ => panic!() } } fn valid_presence(&self) -> bool { self.birth_year.is_some() && self.issue_year.is_some() && self.expiration_year.is_some() && self.height.is_some() && self.hair_color.is_some() && self.eye_color.is_some() && self.passport_id.is_some() } fn errors_part_two(&self) -> Option<PassportError> { if !self.valid_birth_year() { return Some(PassportError::BirthYear) }; if !self.valid_issue_year() { return Some(PassportError::IssueYear) }; if !self.valid_expiration_year() { return Some(PassportError::ExpirationYear) }; if !self.valid_height() { return Some(PassportError::Height) }; if !self.valid_hair_color() { return Some(PassportError::HairColor) }; if !self.valid_eye_color() { return Some(PassportError::EyeColor) }; if !self.valid_passport_id() { return Some(PassportError::PassportId) }; None } fn valid_birth_year(&self) -> bool { validate_year_option(&self.birth_year, 1920, 2002) } fn valid_issue_year(&self) -> bool { validate_year_option(&self.issue_year, 2010, 2020) } fn valid_expiration_year(&self) -> bool { validate_year_option(&self.expiration_year, 2020, 2030) } fn valid_height(&self) -> bool { match &self.height { None => false, Some(value) => { if value.len() <= 2 { return false; } let (height, height_unit) = value.split_at(value.len() - 2); let height_int: usize = height.parse().unwrap(); match height_unit { "in" => height_int >= 59 && height_int <= 76, "cm" => height_int >= 150 && height_int <= 193, _ => false } } } } fn valid_hair_color(&self) -> bool { let color = match &self.hair_color { None => return false, Some(c) => c }; let (hash, code) = color.split_at(1); if hash != "#" { return false; } code.len() == 6 && code.chars().all(|c| c.is_ascii_hexdigit()) } fn valid_eye_color(&self) -> bool { let color = match &self.eye_color { None => return false, Some(c) => c }; match color.as_str() { "amb" | "blu" | "brn" | "gry" | "grn" | "hzl" | "oth" => true, _ => false } } fn valid_passport_id(&self) -> bool { match &self.passport_id { Some(pid) => pid.len() == 9 && pid.chars().all(|c| c.is_numeric()), None => false } } } fn read_input(path: &str) -> Vec<Passport> { let file = std::fs::read_to_string(path).expect("could not open file"); file.lines().fold(vec![Passport { ..Default::default() }], |mut passports, line| { if line.is_empty() { passports.push(Passport { ..Default::default() }); } else { line.split(" ").for_each(|kvpair| passports.last_mut().unwrap().consume_token(kvpair)); } passports }) } fn main() { let passports = read_input("input.txt"); // Part 1 let valid = passports.iter().filter(|passport| passport.valid_presence()).count(); println!("Valid passports for part 1: {}", valid); // Part 2 let valid = passports.iter().filter(|passport| passport.errors_part_two().is_none()).count(); println!("Valid passports for part 2: {}", valid); } #[test] fn test_part_one_validation() { let passports = read_input("example.txt"); let valid = passports.iter().filter(|passport| passport.valid_presence()).count(); assert_eq!(valid, 2); } #[test] fn test_part_two_individual() { // Birth Year let passport = Passport { birth_year: Some(String::from("2002")), ..Default::default() }; assert_eq!(passport.valid_birth_year(), true); let passport = Passport { birth_year: Some(String::from("2003")), ..Default::default() }; assert_eq!(passport.valid_birth_year(), false); // Height let passport = Passport { height: Some(String::from("60in")), ..Default::default() }; assert_eq!(passport.valid_height(), true); let passport = Passport { height: Some(String::from("190cm")), ..Default::default() }; assert_eq!(passport.valid_height(), true); let passport = Passport { height: Some(String::from("190in")), ..Default::default() }; assert_eq!(passport.valid_height(), false); let passport = Passport { height: Some(String::from("190")), ..Default::default() }; assert_eq!(passport.valid_height(), false); // Hair Color let passport = Passport { hair_color: Some(String::from("#123abc")), ..Default::default() }; assert_eq!(passport.valid_hair_color(), true); let passport = Passport { hair_color: Some(String::from("#123abz")), ..Default::default() }; assert_eq!(passport.valid_hair_color(), false); let passport = Passport { hair_color: Some(String::from("123abc")), ..Default::default() }; assert_eq!(passport.valid_hair_color(), false); // Eye Color let passport = Passport { eye_color: Some(String::from("brn")), ..Default::default() }; assert_eq!(passport.valid_eye_color(), true); let passport = Passport { eye_color: Some(String::from("wat")), ..Default::default() }; assert_eq!(passport.valid_eye_color(), false); // Passport ID let passport = Passport { passport_id: Some(String::from("000000001")), ..Default::default() }; assert_eq!(passport.valid_passport_id(), true); let passport = Passport { passport_id: Some(String::from("0123456789")), ..Default::default() }; assert_eq!(passport.valid_passport_id(), false); } #[test] fn test_part_two_full() { let valid_passports = read_input("example_valid.txt"); valid_passports.iter().for_each(|passport| assert!(passport.errors_part_two().is_none())); let invalid_passports = read_input("example_invalid.txt"); invalid_passports.iter().for_each(|passport| assert!(passport.errors_part_two().is_some())); }
pub mod rejection; pub mod todo;
fn main(){ let msg = "Tutorials Point has good tutorials".to_string(); let mut i = 1; for token in msg.split_whitespace(){ println!("token {} {}",i,token); i+=1; } }
//! MIPS specific instructions macro_rules! define_instruction { // specify a different function name ($inst: expr, $fun: ident) => { #[doc = "invoke `"] #[doc = $inst] #[doc = "` instruction"] pub unsafe fn $fun() { llvm_asm!($inst : : : : "volatile"); } }; // directly use instruction name as function name ($inst: ident) => { define_instruction!(stringify!($inst), $inst); } } define_instruction!(wait); define_instruction!(nop); define_instruction!(tlbr); define_instruction!(tlbp); define_instruction!(tlbwr); define_instruction!(tlbwi); define_instruction!(syscall); define_instruction!("break", breakpoint); define_instruction!("eret", exception_return);
//! # nrfxlib - a Rust library for the nRF9160 interface C library //! //! This crate contains wrappers for functions and types defined in Nordic's //! libmodem, which is part of nrfxlib. //! //! The `nrfxlib_sys` crate is the auto-generated wrapper for `nrf_modem_os.h` //! and `nrf_socket.h`. This crate contains Rustic wrappers for those //! auto-generated types. //! //! To bring up the LTE stack you need to call `nrf_modem_init()`. Before that //! you need to enable the EGU1 and EGU2 interrupts, and arrange for the //! relevant functions (`application_irq_handler` and `trace_irq_handler` //! respectively) to be called when they occur. The IPC interrupt handler //! is registered by the relevant callback. //! //! To talk to the LTE modem, use the `at::send_at_command()` function. It will call //! the callback with the response received from the modem. //! //! To automatically send the AT commands which initialise the modem and wait //! until it has registered on the network, call the `wait_for_lte()` function. //! Once that is complete, you can create TCP or TLS sockets and send/receive //! data. //! //! Copyright (c) 42 Technology Ltd 2021 //! //! Dual-licensed under MIT and Apache 2.0. See the [README](../README.md) for //! more details. #![no_std] #![deny(missing_docs)] //****************************************************************************** // Sub-Modules //****************************************************************************** pub mod api; pub mod at; pub mod dtls; mod ffi; pub mod gnss; pub mod modem; mod raw; pub mod tcp; pub mod tls; pub mod udp; //****************************************************************************** // Imports //****************************************************************************** pub use api::*; pub use ffi::{get_last_error, NrfxErr}; pub use raw::{poll, PollEntry, PollFlags, PollResult, Pollable}; use core::cell::RefCell; use cortex_m::interrupt::Mutex; use linked_list_allocator::Heap; use log::{debug, trace}; use nrf9160_pac as cpu; use nrfxlib_sys as sys; //****************************************************************************** // Types //****************************************************************************** /// Create a camel-case type name for socket addresses. #[derive(Debug, Clone)] #[repr(transparent)] pub struct NrfSockAddrIn(sys::nrf_sockaddr_in); /// Create a camel-case type name for socket information. #[derive(Debug, Clone)] #[repr(transparent)] pub struct NrfAddrInfo(sys::nrf_addrinfo); impl core::ops::Deref for NrfSockAddrIn { type Target = sys::nrf_sockaddr_in; fn deref(&self) -> &sys::nrf_sockaddr_in { &self.0 } } /// Errors that can be returned in response to an AT command. #[derive(Debug, Clone)] pub enum AtError { /// Plain `ERROR` response Error, /// `+CME ERROR xx` response CmeError(i32), /// `+CMS ERROR xx` response CmsError(i32), } /// The set of error codes we can get from this API. #[derive(Debug, Clone)] pub enum Error { /// An error was returned by the Nordic library. We supply a string /// descriptor, the return code, and the value of `errno`. Nordic(&'static str, i32, i32), /// An AT error (`ERROR`, `+CMS ERROR` or `+CME ERROR`) was returned by the modem. AtError(AtError), /// Data returned by the modem was not in a format we could understand. BadDataFormat, /// Given hostname was too long for internal buffers to hold HostnameTooLong, /// Unrecognised value from AT interface UnrecognisedValue, /// A socket write error occurred WriteError, /// Too many sockets given TooManySockets, } /// We need to wrap our heap so it's creatable at run-time and accessible from an ISR. /// /// * The Mutex allows us to safely share the heap between interrupt routines /// and the main thread - and nrfxlib will definitely use the heap in an /// interrupt. /// * The RefCell lets us share and object and mutate it (but not at the same /// time) /// * The Option is because the `linked_list_allocator::empty()` function is not /// `const` yet and cannot be called here /// type WrappedHeap = Mutex<RefCell<Option<Heap>>>; //****************************************************************************** // Constants //****************************************************************************** // None //****************************************************************************** // Global Variables //****************************************************************************** /// Our general heap. /// /// We initialise it later with a static variable as the backing store. static LIBRARY_ALLOCATOR: WrappedHeap = Mutex::new(RefCell::new(None)); /// Our transmit heap. /// We initalise this later using a special region of shared memory that can be /// seen by the Cortex-M33 and the modem CPU. static TX_ALLOCATOR: WrappedHeap = Mutex::new(RefCell::new(None)); //****************************************************************************** // Macros //****************************************************************************** // None //****************************************************************************** // Public Functions and Impl on Public Types //****************************************************************************** /// Start the NRF Modem library pub fn init() -> Result<(), Error> { unsafe { /// Allocate some space in global data to use as a heap. static mut HEAP_MEMORY: [u32; 1024] = [0u32; 1024]; let heap_start = HEAP_MEMORY.as_ptr() as usize; let heap_size = HEAP_MEMORY.len() * core::mem::size_of::<u32>(); cortex_m::interrupt::free(|cs| { *LIBRARY_ALLOCATOR.borrow(cs).borrow_mut() = Some(Heap::new(heap_start, heap_size)) }); } // Tell nrf_modem what memory it can use. let params = sys::nrf_modem_init_params_t { shmem: sys::nrf_modem_shmem_cfg { ctrl: sys::nrf_modem_shmem_cfg__bindgen_ty_1 { // At start of shared memory (see memory.x) base: 0x2001_0000, // This is the amount specified in the NCS 1.5.1 release. size: 0x0000_04e8, }, tx: sys::nrf_modem_shmem_cfg__bindgen_ty_2 { // Follows on from control buffer base: 0x2001_04e8, // This is the amount specified in the NCS 1.5.1 release. size: 0x0000_2000, }, rx: sys::nrf_modem_shmem_cfg__bindgen_ty_3 { // Follows on from TX buffer base: 0x2001_24e8, // This is the amount specified in the NCS 1.5.1 release. size: 0x0000_2000, }, // No trace info trace: sys::nrf_modem_shmem_cfg__bindgen_ty_4 { base: 0, size: 0 }, }, ipc_irq_prio: 0, }; unsafe { // Use the same TX memory region as above cortex_m::interrupt::free(|cs| { *TX_ALLOCATOR.borrow(cs).borrow_mut() = Some(Heap::new( params.shmem.tx.base as usize, params.shmem.tx.size as usize, )) }); } // OK, let's start the library let result = unsafe { sys::nrf_modem_init(&params, sys::nrf_modem_mode_t_NORMAL_MODE) }; // Was it happy? if result < 0 { Err(Error::Nordic("init", result, ffi::get_last_error())) } else { trace!("nrfxlib init complete"); Ok(()) } } /// Stop the NRF Modem library pub fn shutdown() { debug!("nrfxlib shutdown"); unsafe { sys::nrf_modem_shutdown(); } trace!("nrfxlib shutdown complete"); } impl From<core::fmt::Error> for Error { fn from(_err: core::fmt::Error) -> Error { Error::WriteError } } impl core::fmt::Display for NrfSockAddrIn { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let octets = self.sin_addr.s_addr.to_be_bytes(); write!( f, "{}.{}.{}.{}:{}", octets[3], octets[2], octets[1], octets[0], u16::from_be(self.sin_port) ) } } //****************************************************************************** // Private Functions and Impl on Private Types //****************************************************************************** // None //****************************************************************************** // End of File //******************************************************************************
//! Calculate the crc64 checksum of the given data, starting with the given crc. //! //! Implements the CRC64 used by Redis, which is the variant with "Jones" coefficients and init value of 0. //! //! Specification of this CRC64 variant follows: //! //! ```text //! Name: crc-64-jones //! Width: 64 bites //! Poly: 0xad93d23594c935a9 //! Reflected In: True //! Xor_In: 0xffffffffffffffff //! Reflected_Out: True //! Xor_Out: 0x0 //! Check("123456789"): 0xe9c6d914c4b8d9ca //! ``` //! //! Example: //! //! ```rust //! let cksum = crc64::crc64(0, b"123456789"); //! assert_eq!(16845390139448941002, cksum); //! ``` use std::io::{self, Write}; use crc_table::CRC64_TAB; mod crc_table; fn to_u64(data: &[u8]) -> u64 { debug_assert!(data.len() == 8); let arr: [u8; 8] = data.try_into().expect("incorrect length"); u64::from_le_bytes(arr) } /// Calculate the Crc64 checksum over `data`, starting from `crc`. /// /// ```rust /// use crc64::crc64; /// /// let cksum = crc64::crc64(0, b"123456789"); /// assert_eq!(16845390139448941002, cksum); /// ``` pub fn crc64(crc: u64, data: &[u8]) -> u64 { let mut crc = crc; let mut len = data.len(); let mut offset = 0usize; while len >= 8 { crc ^= to_u64(&data[offset..(offset + 8)]); crc = CRC64_TAB[7][(crc & 0xff) as usize] ^ CRC64_TAB[6][((crc >> 8) & 0xff) as usize] ^ CRC64_TAB[5][((crc >> 16) & 0xff) as usize] ^ CRC64_TAB[4][((crc >> 24) & 0xff) as usize] ^ CRC64_TAB[3][((crc >> 32) & 0xff) as usize] ^ CRC64_TAB[2][((crc >> 40) & 0xff) as usize] ^ CRC64_TAB[1][((crc >> 48) & 0xff) as usize] ^ CRC64_TAB[0][(crc >> 56) as usize]; offset += 8; len -= 8; } while len > 0 { crc = CRC64_TAB[0][((crc ^ data[offset] as u64) & 0xff) as usize] ^ (crc >> 8); offset += 1; len -= 1; } crc } /// A checksummer. /// /// You can write bytes to it to update the checksum. /// /// ```rust /// use std::io::Write; /// use crc64::Crc64; /// /// let mut cksum = Crc64::new(); /// cksum.write(&[0x1, 0x2, 0x3, 0x4, 0x5]); /// assert_eq!(18087688510130107988, cksum.get()); /// ``` pub struct Crc64 { crc64: u64, } impl Crc64 { /// Creates a new checksummer. pub fn new() -> Crc64 { Crc64 { crc64: 0 } } /// Gets the current crc64 checksum. pub fn get(&self) -> u64 { self.crc64 } } impl Default for Crc64 { fn default() -> Self { Self::new() } } impl Write for Crc64 { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.crc64 = crc64(self.crc64, buf); Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } #[test] fn test_crc64_works() { assert_eq!(0xe9c6d914c4b8d9ca, crc64(0, "123456789".as_bytes())) } #[test] fn test_crc64_write() { let step1 = "12345".as_bytes(); let step2 = "6789".as_bytes(); let value1 = 17326901458626182669; let value2 = 16845390139448941002; assert_eq!(value1, crc64(0, step1)); assert_eq!(value2, crc64(value1, step2)); let mut crc = Crc64::new(); assert_eq!(crc.write(step1).unwrap(), step1.len()); assert_eq!(value1, crc.get()); assert_eq!(crc.write(step2).unwrap(), step2.len()); assert_eq!(value2, crc.get()); }
use pasture_core::meta::Metadata; use std::fmt::Display; /// `Metadata` implementation for ascii files /// In general there is no metadata in ascii files. #[derive(Debug, Clone)] pub struct AsciiMetadata {} impl AsciiMetadata { pub fn new() -> Self { Self {} } } impl Display for AsciiMetadata { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "Ascii Metadata")?; Ok(()) } } impl Metadata for AsciiMetadata { fn bounds(&self) -> Option<pasture_core::math::AABB<f64>> { None } fn number_of_points(&self) -> Option<usize> { None } fn get_named_field(&self, _field_name: &str) -> Option<Box<dyn std::any::Any>> { None } fn clone_into_box(&self) -> Box<dyn Metadata> { Box::new(self.clone()) } }
use std::io::{stdin, Read, StdinLock}; use std::str::FromStr; #[allow(dead_code)] struct Scanner<'a> { cin: StdinLock<'a>, } #[allow(dead_code)] impl<'a> Scanner<'a> { fn new(cin: StdinLock<'a>) -> Scanner<'a> { Scanner { cin: cin } } fn read<T: FromStr>(&mut self) -> Option<T> { let token = self.cin.by_ref().bytes().map(|c| c.unwrap() as char) .skip_while(|c| c.is_whitespace()) .take_while(|c| !c.is_whitespace()) .collect::<String>(); token.parse::<T>().ok() } fn input<T: FromStr>(&mut self) -> T { self.read().unwrap() } fn vec<T: FromStr>(&mut self, len: usize) -> Vec<T> { (0..len).map(|_| self.input()).collect() } fn mat<T: FromStr>(&mut self, row: usize, col: usize) -> Vec<Vec<T>> { (0..row).map(|_| self.vec(col)).collect() } } fn main() { let cin = stdin(); let cin = cin.lock(); let mut sc = Scanner::new(cin); let a: Vec<i64> = sc.vec(3); let n = a.iter().sum(); let mut arr: Vec<i64> = (1..=n).collect(); let mut ans = 0; loop { let mut x = [[0; 3]; 3]; let mut k = 0; for i in 0..3 { for j in 0..(a[i] as usize) { x[i][j] = arr[k]; k += 1; } } let mut ok = true; for i in 0..3 { for j in 0..(a[i] as usize) { ok = ok && (i == 0 || x[i][j] > x[i - 1][j]) && (j == 0 || x[i][j] > x[i][j - 1]); } } if ok { ans += 1; } if !arr.next_permutation() { break; } } println!("{}", ans); } pub trait LexicalPermutation { /// Return `true` if the slice was permuted, `false` if it is already /// at the last ordered permutation. fn next_permutation(&mut self) -> bool; /// Return `true` if the slice was permuted, `false` if it is already /// at the first ordered permutation. fn prev_permutation(&mut self) -> bool; } impl<T> LexicalPermutation for [T] where T: Ord { /// Original author in Rust: Thomas Backman <serenity@exscape.org> fn next_permutation(&mut self) -> bool { // These cases only have 1 permutation each, so we can't do anything. if self.len() < 2 { return false; } // Step 1: Identify the longest, rightmost weakly decreasing part of the vector let mut i = self.len() - 1; while i > 0 && self[i-1] >= self[i] { i -= 1; } // If that is the entire vector, this is the last-ordered permutation. if i == 0 { return false; } // Step 2: Find the rightmost element larger than the pivot (i-1) let mut j = self.len() - 1; while j >= i && self[j] <= self[i-1] { j -= 1; } // Step 3: Swap that element with the pivot self.swap(j, i-1); // Step 4: Reverse the (previously) weakly decreasing part self[i..].reverse(); true } fn prev_permutation(&mut self) -> bool { // These cases only have 1 permutation each, so we can't do anything. if self.len() < 2 { return false; } // Step 1: Identify the longest, rightmost weakly increasing part of the vector let mut i = self.len() - 1; while i > 0 && self[i-1] <= self[i] { i -= 1; } // If that is the entire vector, this is the first-ordered permutation. if i == 0 { return false; } // Step 2: Reverse the weakly increasing part self[i..].reverse(); // Step 3: Find the rightmost element equal to or bigger than the pivot (i-1) let mut j = self.len() - 1; while j >= i && self[j-1] < self[i-1] { j -= 1; } // Step 4: Swap that element with the pivot self.swap(i-1, j); true } }
// Copyright 2021 The Matrix.org Foundation C.I.C. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::HashSet; use serde::Serialize; use serde_with::skip_serializing_none; use url::Url; use crate::{ pkce::CodeChallengeMethod, requests::{GrantType, ResponseMode, ResponseType}, }; // TODO: https://datatracker.ietf.org/doc/html/rfc8414#section-2 #[skip_serializing_none] #[derive(Serialize)] pub struct Metadata { /// The authorization server's issuer identifier, which is a URL that uses /// the "https" scheme and has no query or fragment components. pub issuer: Url, /// URL of the authorization server's authorization endpoint. pub authorization_endpoint: Option<Url>, /// URL of the authorization server's token endpoint. pub token_endpoint: Option<Url>, /// URL of the authorization server's JWK Set document. pub jwks_uri: Option<Url>, /// URL of the authorization server's OAuth 2.0 Dynamic Client Registration /// endpoint. pub registration_endpoint: Option<Url>, /// JSON array containing a list of the OAuth 2.0 "scope" values that this /// authorization server supports. pub scopes_supported: Option<HashSet<String>>, /// JSON array containing a list of the OAuth 2.0 "response_type" values /// that this authorization server supports. pub response_types_supported: Option<HashSet<ResponseType>>, /// JSON array containing a list of the OAuth 2.0 "response_mode" values /// that this authorization server supports, as specified in "OAuth 2.0 /// Multiple Response Type Encoding Practices". pub response_modes_supported: Option<HashSet<ResponseMode>>, /// JSON array containing a list of the OAuth 2.0 grant type values that /// this authorization server supports. pub grant_types_supported: Option<HashSet<GrantType>>, /// PKCE code challenge methods supported by this authorization server pub code_challenge_methods_supported: Option<HashSet<CodeChallengeMethod>>, }
mod airing_schedule; mod character; pub mod embeds; mod media; pub mod pagination; mod staff; mod studio; mod types; mod user; pub use pagination::AniListPagination; pub use types::{ AniListCharacterView, AniListMediaView, AniListPaginationKind, AniListStaffView, AniListUserView, };
extern crate docopt; extern crate rand; extern crate rustc_serialize; use rand::Rng; const USAGE: &'static str = " Usage: pi <num-samples> pi --help Options: -h --help Show this screen. <num-samples> Number of samples. "; #[derive(Debug, RustcDecodable)] struct Args { arg_num_samples: u64, } fn main() { let args: Args = docopt::Docopt::new(USAGE) .and_then(|d| d.decode()) .unwrap_or_else(|e| e.exit()); let num_samples = args.arg_num_samples; let mut rng = rand::thread_rng(); println!("calculating pi with {} samples ...", num_samples); let mut count = 0; for _ in 0 .. num_samples { let x = rng.next_f64(); let y = rng.next_f64(); if x * x + y * y <= 1.0 { count += 1; } } println!("pi = {}", 4. * (count as f64) / (num_samples as f64)); }
mod database; mod get_data; use anyhow::anyhow; use chrono::{DateTime, Utc}; use get_data::process; use icalendar::{Calendar, Component, Event}; use std::collections::HashMap; use std::str::FromStr; use tide::{http::Mime, Error, Request, Response, StatusCode}; #[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] pub struct Data { class: String, time_begin: DateTime<Utc>, time_end: DateTime<Utc>, place: String, } impl Data { pub fn to_json_map(&self) -> HashMap<&'static str, String> { let mut map = HashMap::new(); map.insert("title", format!("{}\n{}", self.class, self.place)); map.insert("start", (self.time_begin).to_rfc3339()); map.insert("end", (self.time_end).to_rfc3339()); map } pub fn class(&self) -> String { self.class.to_string() } pub fn place(&self) -> String { self.place.to_string() } pub fn to_ics_event(&self) -> Event { Event::new() .summary(self.class().as_str()) .description(format!("Địa điểm: {}", self.place()).as_str()) .location(self.place().trim()) .starts(self.time_begin) .ends(self.time_end) .done() } } #[async_std::main] async fn main() -> Result<(), anyhow::Error> { tide::log::start(); database::migrate().await?; let mut app = tide::new(); app.at("/").get(|_| async { let mut res = Response::new(StatusCode::Accepted); res.set_content_type(tide::http::mime::HTML); res.set_body(include_str!("index.html")); Ok(res) }); app.at("/ics/*").get(|req: Request<()>| async move { let info: String = req.url().path().replace("/ics/", ""); let (usr, pwd) = info .split_once(|c| (c == '_') | (c == '/')) .ok_or_else(|| Error::new(400, anyhow!("Example CT010101/Passwd")))?; let usr = usr.to_uppercase(); database::set_data(&usr, &process(&usr, pwd).await?).await?; let events = database::get_data(&usr) .await? .iter() .map(Data::to_ics_event) .collect::<Vec<Event>>(); let mut cal = Calendar::new(); cal.name("TKBSV"); cal.extend(events); let mut res = Response::new(StatusCode::Accepted); res.set_content_type(Mime::from_str("text/calendar").unwrap()); res.set_body(format!("{}", cal).into_bytes()); Ok(res) }); app.at("/json/*").get(|req: Request<()>| async move { let path = req.url().path().replace("/json/", ""); let (usr, pwd) = path .split_once('/') .ok_or_else(|| Error::new(400, anyhow!("Example CT010101/Passwd")))?; let usr = usr.to_uppercase(); database::set_data(&usr, &process(&usr, pwd).await?).await?; let doc = database::get_data(&usr) .await? .iter() .map(|dat| dat.to_json_map()) .collect::<Vec<HashMap<&'static str, String>>>(); let mut res = Response::new(StatusCode::Accepted); res.set_content_type(Mime::from_str("application/json")?); res.set_body(tide::Body::from_json(&doc)?); Ok(res) }); let port = std::env::var("PORT").unwrap_or_else(|_| "8080".to_string()); Ok(app.listen(format!("0.0.0.0:{}", port)).await?) }
extern crate failure; use std::collections::HashMap; use std::env; use std::fs::File; use std::io::{BufRead, BufReader}; use failure::Error; fn matches(a: &str, b: &str) -> String { a.chars() .zip(b.chars()) .filter(|(x, y)| x == y) .map(|(x, _)| x) .collect() } fn main() -> Result<(), Error> { let args: Vec<String> = env::args().collect(); let file = File::open(&args[1])?; let mut box_ids: Vec<String> = BufReader::new(file) .lines() .map(|l| l.expect("file read failed")) .collect(); let mut twos = 0; let mut threes = 0; for id in box_ids.iter() { let mut letter_counts = HashMap::new(); for letter in id.chars() { *letter_counts.entry(letter).or_insert(0) += 1; } if letter_counts.values().any(|&x| x == 2) { twos += 1 }; if letter_counts.values().any(|&x| x == 3) { threes += 1 }; } println!("checksum {}", twos * threes); box_ids.sort(); for ids in box_ids.windows(2) { let id_len = ids[0].len(); let matches = matches(&ids[0], &ids[1]); let distance = id_len - matches.len(); if distance == 1 { println!("{}", matches); } } Ok(()) }
// This file is part of Substrate. // Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <https://www.gnu.org/licenses/>. //! Substrate state API. pub mod error; pub mod helpers; use self::error::FutureResult; use jsonrpc_core::Result as RpcResult; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; use sp_core::storage::{StorageChangeSet, StorageData, StorageKey}; use sp_core::Bytes; use sp_version::RuntimeVersion; pub use self::gen_client::Client as StateClient; pub use self::helpers::ReadProof; /// Substrate state API #[rpc] pub trait StateApi<Hash> { /// RPC Metadata type Metadata; /// Call a contract at a block's state. #[rpc(name = "state_call", alias("state_callAt"))] fn call(&self, name: String, bytes: Bytes, hash: Option<Hash>) -> FutureResult<Bytes>; /// DEPRECATED: Please use `state_getKeysPaged` with proper paging support. /// Returns the keys with prefix, leave empty to get all the keys. #[rpc(name = "state_getKeys")] fn storage_keys(&self, prefix: StorageKey, hash: Option<Hash>) -> FutureResult<Vec<StorageKey>>; /// Returns the keys with prefix, leave empty to get all the keys #[rpc(name = "state_getPairs")] fn storage_pairs( &self, prefix: StorageKey, hash: Option<Hash>, ) -> FutureResult<Vec<(StorageKey, StorageData)>>; /// Returns the keys with prefix with pagination support. /// Up to `count` keys will be returned. /// If `start_key` is passed, return next keys in storage in lexicographic order. #[rpc(name = "state_getKeysPaged", alias("state_getKeysPagedAt"))] fn storage_keys_paged( &self, prefix: Option<StorageKey>, count: u32, start_key: Option<StorageKey>, hash: Option<Hash>, ) -> FutureResult<Vec<StorageKey>>; /// Returns a storage entry at a specific block's state. #[rpc(name = "state_getStorage", alias("state_getStorageAt"))] fn storage(&self, key: StorageKey, hash: Option<Hash>) -> FutureResult<Option<StorageData>>; /// Returns the hash of a storage entry at a block's state. #[rpc(name = "state_getStorageHash", alias("state_getStorageHashAt"))] fn storage_hash(&self, key: StorageKey, hash: Option<Hash>) -> FutureResult<Option<Hash>>; /// Returns the size of a storage entry at a block's state. #[rpc(name = "state_getStorageSize", alias("state_getStorageSizeAt"))] fn storage_size(&self, key: StorageKey, hash: Option<Hash>) -> FutureResult<Option<u64>>; /// Returns the runtime metadata as an opaque blob. #[rpc(name = "state_getMetadata")] fn metadata(&self, hash: Option<Hash>) -> FutureResult<Bytes>; /// Get the runtime version. #[rpc(name = "state_getRuntimeVersion", alias("chain_getRuntimeVersion"))] fn runtime_version(&self, hash: Option<Hash>) -> FutureResult<RuntimeVersion>; /// Query historical storage entries (by key) starting from a block given as the second /// parameter. /// /// NOTE This first returned result contains the initial state of storage for all keys. /// Subsequent values in the vector represent changes to the previous state (diffs). #[rpc(name = "state_queryStorage")] fn query_storage( &self, keys: Vec<StorageKey>, block: Hash, hash: Option<Hash>, ) -> FutureResult<Vec<StorageChangeSet<Hash>>>; /// Query storage entries (by key) starting at block hash given as the second parameter. #[rpc(name = "state_queryStorageAt")] fn query_storage_at( &self, keys: Vec<StorageKey>, at: Option<Hash>, ) -> FutureResult<Vec<StorageChangeSet<Hash>>>; /// Returns proof of storage entries at a specific block's state. #[rpc(name = "state_getReadProof")] fn read_proof( &self, keys: Vec<StorageKey>, hash: Option<Hash>, ) -> FutureResult<ReadProof<Hash>>; /// New runtime version subscription #[pubsub( subscription = "state_runtimeVersion", subscribe, name = "state_subscribeRuntimeVersion", alias("chain_subscribeRuntimeVersion") )] fn subscribe_runtime_version( &self, metadata: Self::Metadata, subscriber: Subscriber<RuntimeVersion>, ); /// Unsubscribe from runtime version subscription #[pubsub( subscription = "state_runtimeVersion", unsubscribe, name = "state_unsubscribeRuntimeVersion", alias("chain_unsubscribeRuntimeVersion") )] fn unsubscribe_runtime_version( &self, metadata: Option<Self::Metadata>, id: SubscriptionId, ) -> RpcResult<bool>; /// New storage subscription #[pubsub(subscription = "state_storage", subscribe, name = "state_subscribeStorage")] fn subscribe_storage( &self, metadata: Self::Metadata, subscriber: Subscriber<StorageChangeSet<Hash>>, keys: Option<Vec<StorageKey>>, ); /// Unsubscribe from storage subscription #[pubsub(subscription = "state_storage", unsubscribe, name = "state_unsubscribeStorage")] fn unsubscribe_storage( &self, metadata: Option<Self::Metadata>, id: SubscriptionId, ) -> RpcResult<bool>; }
use clap::Args; use crate::{ arrow::{polars::nonnull_schema, writer::open_parquet_writer}, prelude::*, }; use polars::prelude::*; static ALL_ISBNS_FILE: &str = "book-links/all-isbns.parquet"; /// Link records to ISBN IDs. #[derive(Debug, Args)] #[command(name = "link-isbn-ids")] pub struct LinkISBNIds { /// Read record IDs from RECFLD. #[arg( short = 'R', long = "record-id", name = "RECFLD", default_value = "rec_id" )] rec_field: String, /// Read ISBNs from FIELD. #[arg( short = 'I', long = "isbn-field", name = "FIELD", default_value = "isbn" )] isbn_fields: Vec<String>, /// Write output to FILE. #[arg(short = 'o', long = "output", name = "FILE")] outfile: PathBuf, /// Read records from INPUT. #[arg(name = "INFILE")] infile: PathBuf, } impl Command for LinkISBNIds { fn exec(&self) -> Result<()> { info!("record field: {}", &self.rec_field); info!("ISBN fields: {:?}", &self.isbn_fields); let isbns = LazyFrame::scan_parquet(ALL_ISBNS_FILE, default())?; let records = LazyFrame::scan_parquet(&self.infile, default())?; let merged = if self.isbn_fields.len() == 1 { // one column, join on it records.join( isbns, &[col(self.isbn_fields[0].as_str())], &[col("isbn")], JoinType::Inner, ) } else { let mut melt = MeltArgs::default(); melt.id_vars.push(self.rec_field.clone()); for fld in &self.isbn_fields { melt.value_vars.push(fld.clone()); } melt.value_name = Some("isbn".to_string()); melt.variable_name = Some("field".to_string()); let rm = records.melt(melt); rm.join(isbns, &[col("isbn")], &[col("isbn")], JoinType::Inner) }; let filtered = merged .filter(col("isbn").is_not_null()) .select(&[col(self.rec_field.as_str()), col("isbn_id")]) .unique(None, UniqueKeepStrategy::First) .sort(self.rec_field.as_str(), default()); info!("collecting results"); let frame = filtered.collect()?; if frame.column(&self.rec_field)?.null_count() > 0 { error!("final frame has null record IDs"); return Err(anyhow!("data check failed")); } if frame.column("isbn_id")?.null_count() > 0 { error!("final frame has null ISBN IDs"); return Err(anyhow!("data check failed")); } info!("saving {} links to {:?}", frame.height(), &self.outfile); let schema = nonnull_schema(&frame); let writer = open_parquet_writer(&self.outfile, schema)?; writer.write_and_finish(frame.iter_chunks())?; Ok(()) } }
pub mod languages; pub mod link_checker; pub mod markup; pub mod search; pub mod slugify; pub mod taxonomies; use std::collections::HashMap; use std::path::{Path, PathBuf}; use globset::{Glob, GlobSet, GlobSetBuilder}; use serde_derive::{Deserialize, Serialize}; use syntect::parsing::SyntaxSetBuilder; use toml::Value as Toml; use crate::theme::Theme; use errors::{bail, Error, Result}; use utils::fs::read_file_with_error; use self::markup::HighlighterSettings; // We want a default base url for tests static DEFAULT_BASE_URL: &str = "http://a-website.com"; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum Mode { Build, Serve, Check, } #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct Config { /// Base URL of the site, the only required config argument pub base_url: String, /// Theme to use pub theme: Option<String>, /// Title of the site. Defaults to None pub title: Option<String>, /// Description of the site pub description: Option<String>, /// The language used in the site. Defaults to "en" pub default_language: String, /// The list of supported languages outside of the default one pub languages: Vec<languages::Language>, /// Languages list and translated strings /// /// The `String` key of `HashMap` is a language name, the value should be toml crate `Table` /// with String key representing term and value another `String` representing its translation. /// /// The attribute is intentionally not public, use `get_translation()` method for translating /// key into different language. translations: HashMap<String, languages::TranslateTerm>, /// Whether to generate a feed. Defaults to false. pub generate_feed: bool, /// The number of articles to include in the feed. Defaults to including all items. pub feed_limit: Option<usize>, /// The filename to use for feeds. Used to find the template, too. /// Defaults to "atom.xml", with "rss.xml" also having a template provided out of the box. pub feed_filename: String, /// If set, files from static/ will be hardlinked instead of copied to the output dir. pub hard_link_static: bool, pub taxonomies: Vec<taxonomies::Taxonomy>, /// Whether to compile the `sass` directory and output the css files into the static folder pub compile_sass: bool, /// Whether to minify the html output pub minify_html: bool, /// Whether to build the search index for the content pub build_search_index: bool, /// A list of file glob patterns to ignore when processing the content folder. Defaults to none. /// Had to remove the PartialEq derive because GlobSet does not implement it. No impact /// because it's unused anyway (who wants to sort Configs?). pub ignored_content: Vec<String>, #[serde(skip_serializing, skip_deserializing)] // not a typo, 2 are needed pub ignored_content_globset: Option<GlobSet>, /// The mode Zola is currently being ran on. Some logging/feature can differ depending on the /// command being used. #[serde(skip_serializing)] pub mode: Mode, /// A list of directories to search for additional `.sublime-syntax` files in. pub extra_syntaxes: Vec<String>, pub output_dir: String, pub link_checker: link_checker::LinkChecker, /// The setup for which slugification strategies to use for paths, taxonomies and anchors pub slugify: slugify::Slugify, /// The search config, telling what to include in the search index pub search: search::Search, /// The config for the Markdown rendering: syntax highlighting and everything pub markdown: markup::Markdown, /// List of enabled Lua plugins pub plugins: Vec<String>, /// All user params set in [extra] in the config pub extra: HashMap<String, Toml>, } impl Config { /// Parses a string containing TOML to our Config struct /// Any extra parameter will end up in the extra field pub fn parse(content: &str) -> Result<Config> { let mut config: Config = match toml::from_str(content) { Ok(c) => c, Err(e) => bail!(e), }; if config.base_url.is_empty() || config.base_url == DEFAULT_BASE_URL { bail!("A base URL is required in config.toml with key `base_url`"); } if config.languages.iter().any(|l| l.code == config.default_language) { bail!("Default language `{}` should not appear both in `config.default_language` and `config.languages`", config.default_language) } if !config.ignored_content.is_empty() { // Convert the file glob strings into a compiled glob set matcher. We want to do this once, // at program initialization, rather than for every page, for example. We arrange for the // globset matcher to always exist (even though it has to be an inside an Option at the // moment because of the TOML serializer); if the glob set is empty the `is_match` function // of the globber always returns false. let mut glob_set_builder = GlobSetBuilder::new(); for pat in &config.ignored_content { let glob = match Glob::new(pat) { Ok(g) => g, Err(e) => bail!("Invalid ignored_content glob pattern: {}, error = {}", pat, e), }; glob_set_builder.add(glob); } config.ignored_content_globset = Some(glob_set_builder.build().expect("Bad ignored_content in config file.")); } for taxonomy in config.taxonomies.iter_mut() { if taxonomy.lang.is_empty() { taxonomy.lang = config.default_language.clone(); } } Ok(config) } /// Parses a config file from the given path pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Config> { let path = path.as_ref(); let file_name = path.file_name().unwrap(); let content = read_file_with_error( path, &format!("No `{:?}` file found. Are you in the right directory?", file_name), )?; Config::parse(&content) } /// Attempt to load any extra syntax found in the extra syntaxes of the config /// TODO: move to markup.rs in 0.14 pub fn load_extra_syntaxes(&mut self, base_path: &Path) -> Result<()> { let extra_syntaxes = &self.markdown.extra_syntaxes; if extra_syntaxes.is_empty() { return Ok(()); } let mut ss = SyntaxSetBuilder::new(); for dir in extra_syntaxes { ss.add_from_folder(base_path.join(dir), true)?; } self.markdown.extra_syntax_set = Some(ss.build()); Ok(()) } /// Makes a url, taking into account that the base url might have a trailing slash pub fn make_permalink(&self, path: &str) -> String { let trailing_bit = if path.ends_with('/') || path.ends_with(&self.feed_filename) || path.is_empty() { "" } else { "/" }; // Index section with a base url that has a trailing slash if self.base_url.ends_with('/') && path == "/" { self.base_url.clone() } else if path == "/" { // index section with a base url that doesn't have a trailing slash format!("{}/", self.base_url) } else if self.base_url.ends_with('/') && path.starts_with('/') { format!("{}{}{}", self.base_url, &path[1..], trailing_bit) } else if self.base_url.ends_with('/') || path.starts_with('/') { format!("{}{}{}", self.base_url, path, trailing_bit) } else { format!("{}/{}{}", self.base_url, path, trailing_bit) } } /// Merges the extra data from the theme with the config extra data fn add_theme_extra(&mut self, theme: &Theme) -> Result<()> { for (key, val) in &theme.extra { if !self.extra.contains_key(key) { // The key is not overridden in site config, insert it self.extra.insert(key.to_string(), val.clone()); continue; } merge(self.extra.get_mut(key).unwrap(), val)?; } Ok(()) } /// Parse the theme.toml file and merges the extra data from the theme /// with the config extra data pub fn merge_with_theme(&mut self, path: &PathBuf) -> Result<()> { let theme = Theme::from_file(path)?; self.add_theme_extra(&theme) } /// Is this site using i18n? pub fn is_multilingual(&self) -> bool { !self.languages.is_empty() } /// Returns the codes of all additional languages pub fn languages_codes(&self) -> Vec<&str> { self.languages.iter().map(|l| l.code.as_ref()).collect() } pub fn is_in_build_mode(&self) -> bool { self.mode == Mode::Build } pub fn is_in_serve_mode(&self) -> bool { self.mode == Mode::Serve } pub fn is_in_check_mode(&self) -> bool { self.mode == Mode::Check } pub fn enable_serve_mode(&mut self) { self.mode = Mode::Serve; } pub fn enable_check_mode(&mut self) { self.mode = Mode::Check; // Disable syntax highlighting since the results won't be used // and this operation can be expensive. self.markdown.highlighter = HighlighterSettings::None; } pub fn get_translation<S: AsRef<str>>(&self, lang: S, key: S) -> Result<String> { let terms = self.translations.get(lang.as_ref()).ok_or_else(|| { Error::msg(format!("Translation for language '{}' is missing", lang.as_ref())) })?; terms .get(key.as_ref()) .ok_or_else(|| { Error::msg(format!( "Translation key '{}' for language '{}' is missing", key.as_ref(), lang.as_ref() )) }) .map(|term| term.to_string()) } } // merge TOML data that can be a table, or anything else pub fn merge(into: &mut Toml, from: &Toml) -> Result<()> { match (from.is_table(), into.is_table()) { (false, false) => { // These are not tables so we have nothing to merge Ok(()) } (true, true) => { // Recursively merge these tables let into_table = into.as_table_mut().unwrap(); for (key, val) in from.as_table().unwrap() { if !into_table.contains_key(key) { // An entry was missing in the first table, insert it into_table.insert(key.to_string(), val.clone()); continue; } // Two entries to compare, recurse merge(into_table.get_mut(key).unwrap(), val)?; } Ok(()) } _ => { // Trying to merge a table with something else Err(Error::msg(&format!("Cannot merge config.toml with theme.toml because the following values have incompatibles types:\n- {}\n - {}", into, from))) } } } impl Default for Config { fn default() -> Config { Config { base_url: DEFAULT_BASE_URL.to_string(), theme: None, title: None, description: None, default_language: "en".to_string(), languages: Vec::new(), translations: HashMap::new(), generate_feed: false, feed_limit: None, feed_filename: "atom.xml".to_string(), hard_link_static: false, taxonomies: Vec::new(), compile_sass: false, minify_html: false, build_search_index: false, ignored_content: Vec::new(), ignored_content_globset: None, mode: Mode::Build, extra_syntaxes: Vec::new(), output_dir: "public".to_string(), link_checker: link_checker::LinkChecker::default(), slugify: slugify::Slugify::default(), search: search::Search::default(), markdown: markup::Markdown::default(), plugins: Vec::new(), extra: HashMap::new(), } } } #[cfg(test)] mod tests { use super::*; use utils::slugs::SlugifyStrategy; #[test] fn can_import_valid_config() { let config = r#" title = "My site" base_url = "https://replace-this-with-your-url.com" "#; let config = Config::parse(config).unwrap(); assert_eq!(config.title.unwrap(), "My site".to_string()); } #[test] fn errors_when_invalid_type() { let config = r#" title = 1 base_url = "https://replace-this-with-your-url.com" "#; let config = Config::parse(config); assert!(config.is_err()); } #[test] fn errors_when_missing_required_field() { // base_url is required let config = r#" title = "" "#; let config = Config::parse(config); assert!(config.is_err()); } #[test] fn can_add_extra_values() { let config = r#" title = "My site" base_url = "https://replace-this-with-your-url.com" [extra] hello = "world" "#; let config = Config::parse(config); assert!(config.is_ok()); assert_eq!(config.unwrap().extra.get("hello").unwrap().as_str().unwrap(), "world"); } #[test] fn can_make_url_index_page_with_non_trailing_slash_url() { let mut config = Config::default(); config.base_url = "http://vincent.is".to_string(); assert_eq!(config.make_permalink(""), "http://vincent.is/"); } #[test] fn can_make_url_index_page_with_railing_slash_url() { let mut config = Config::default(); config.base_url = "http://vincent.is/".to_string(); assert_eq!(config.make_permalink(""), "http://vincent.is/"); } #[test] fn can_make_url_with_non_trailing_slash_base_url() { let mut config = Config::default(); config.base_url = "http://vincent.is".to_string(); assert_eq!(config.make_permalink("hello"), "http://vincent.is/hello/"); } #[test] fn can_make_url_with_trailing_slash_path() { let mut config = Config::default(); config.base_url = "http://vincent.is/".to_string(); assert_eq!(config.make_permalink("/hello"), "http://vincent.is/hello/"); } #[test] fn can_make_url_with_localhost() { let mut config = Config::default(); config.base_url = "http://127.0.0.1:1111".to_string(); assert_eq!(config.make_permalink("/tags/rust"), "http://127.0.0.1:1111/tags/rust/"); } // https://github.com/Keats/gutenberg/issues/486 #[test] fn doesnt_add_trailing_slash_to_feed() { let mut config = Config::default(); config.base_url = "http://vincent.is/".to_string(); assert_eq!(config.make_permalink("atom.xml"), "http://vincent.is/atom.xml"); } #[test] fn can_merge_with_theme_data_and_preserve_config_value() { let config_str = r#" title = "My site" base_url = "https://replace-this-with-your-url.com" [extra] hello = "world" [extra.sub] foo = "bar" [extra.sub.sub] foo = "bar" "#; let mut config = Config::parse(config_str).unwrap(); let theme_str = r#" [extra] hello = "foo" a_value = 10 [extra.sub] foo = "default" truc = "default" [extra.sub.sub] foo = "default" truc = "default" "#; let theme = Theme::parse(theme_str).unwrap(); assert!(config.add_theme_extra(&theme).is_ok()); let extra = config.extra; assert_eq!(extra["hello"].as_str().unwrap(), "world".to_string()); assert_eq!(extra["a_value"].as_integer().unwrap(), 10); assert_eq!(extra["sub"]["foo"].as_str().unwrap(), "bar".to_string()); assert_eq!(extra["sub"].get("truc").expect("The whole extra.sub table was overridden by theme data, discarding extra.sub.truc").as_str().unwrap(), "default".to_string()); assert_eq!(extra["sub"]["sub"]["foo"].as_str().unwrap(), "bar".to_string()); assert_eq!( extra["sub"]["sub"] .get("truc") .expect("Failed to merge subsubtable extra.sub.sub") .as_str() .unwrap(), "default".to_string() ); } const CONFIG_TRANSLATION: &str = r#" base_url = "https://remplace-par-ton-url.fr" default_language = "fr" [translations] [translations.fr] title = "Un titre" [translations.en] title = "A title" "#; #[test] fn can_use_present_translation() { let config = Config::parse(CONFIG_TRANSLATION).unwrap(); assert_eq!(config.get_translation("fr", "title").unwrap(), "Un titre"); assert_eq!(config.get_translation("en", "title").unwrap(), "A title"); } #[test] fn error_on_absent_translation_lang() { let config = Config::parse(CONFIG_TRANSLATION).unwrap(); let error = config.get_translation("absent", "key").unwrap_err(); assert_eq!("Translation for language 'absent' is missing", format!("{}", error)); } #[test] fn error_on_absent_translation_key() { let config = Config::parse(CONFIG_TRANSLATION).unwrap(); let error = config.get_translation("en", "absent").unwrap_err(); assert_eq!("Translation key 'absent' for language 'en' is missing", format!("{}", error)); } #[test] fn missing_ignored_content_results_in_empty_vector_and_empty_globset() { let config_str = r#" title = "My site" base_url = "example.com" "#; let config = Config::parse(config_str).unwrap(); let v = config.ignored_content; assert_eq!(v.len(), 0); assert!(config.ignored_content_globset.is_none()); } #[test] fn empty_ignored_content_results_in_empty_vector_and_empty_globset() { let config_str = r#" title = "My site" base_url = "example.com" ignored_content = [] "#; let config = Config::parse(config_str).unwrap(); assert_eq!(config.ignored_content.len(), 0); assert!(config.ignored_content_globset.is_none()); } #[test] fn non_empty_ignored_content_results_in_vector_of_patterns_and_configured_globset() { let config_str = r#" title = "My site" base_url = "example.com" ignored_content = ["*.{graphml,iso}", "*.py?"] "#; let config = Config::parse(config_str).unwrap(); let v = config.ignored_content; assert_eq!(v, vec!["*.{graphml,iso}", "*.py?"]); let g = config.ignored_content_globset.unwrap(); assert_eq!(g.len(), 2); assert!(g.is_match("foo.graphml")); assert!(g.is_match("foo.iso")); assert!(!g.is_match("foo.png")); assert!(g.is_match("foo.py2")); assert!(g.is_match("foo.py3")); assert!(!g.is_match("foo.py")); } #[test] fn link_checker_skip_anchor_prefixes() { let config_str = r#" title = "My site" base_url = "example.com" [link_checker] skip_anchor_prefixes = [ "https://caniuse.com/#feat=", "https://github.com/rust-lang/rust/blob/", ] "#; let config = Config::parse(config_str).unwrap(); assert_eq!( config.link_checker.skip_anchor_prefixes, vec!["https://caniuse.com/#feat=", "https://github.com/rust-lang/rust/blob/"] ); } #[test] fn link_checker_skip_prefixes() { let config_str = r#" title = "My site" base_url = "example.com" [link_checker] skip_prefixes = [ "http://[2001:db8::]/", "https://www.example.com/path", ] "#; let config = Config::parse(config_str).unwrap(); assert_eq!( config.link_checker.skip_prefixes, vec!["http://[2001:db8::]/", "https://www.example.com/path",] ); } #[test] fn slugify_strategies() { let config_str = r#" title = "My site" base_url = "example.com" [slugify] paths = "on" taxonomies = "safe" anchors = "off" "#; let config = Config::parse(config_str).unwrap(); assert_eq!(config.slugify.paths, SlugifyStrategy::On); assert_eq!(config.slugify.taxonomies, SlugifyStrategy::Safe); assert_eq!(config.slugify.anchors, SlugifyStrategy::Off); } #[test] fn error_on_language_set_twice() { let config_str = r#" base_url = "https://remplace-par-ton-url.fr" default_language = "fr" languages = [ { code = "fr" }, { code = "en" }, ] "#; let config = Config::parse(config_str); let err = config.unwrap_err(); assert_eq!("Default language `fr` should not appear both in `config.default_language` and `config.languages`", format!("{}", err)); } #[test] fn cannot_overwrite_theme_mapping_with_invalid_type() { let config_str = r#" base_url = "http://localhost:1312" default_language = "fr" [extra] foo = "bar" "#; let mut config = Config::parse(config_str).unwrap(); let theme_str = r#" [extra] [extra.foo] bar = "baz" "#; let theme = Theme::parse(theme_str).unwrap(); // We expect an error here assert_eq!(false, config.add_theme_extra(&theme).is_ok()); } #[test] fn default_output_dir() { let config = r#" title = "My site" base_url = "https://replace-this-with-your-url.com" "#; let config = Config::parse(config).unwrap(); assert_eq!(config.output_dir, "public".to_string()); } #[test] fn can_add_output_dir() { let config = r#" title = "My site" base_url = "https://replace-this-with-your-url.com" output_dir = "docs" "#; let config = Config::parse(config).unwrap(); assert_eq!(config.output_dir, "docs".to_string()); } }
pub mod checks; pub mod config; pub mod consts; pub mod event_handler; pub mod framework; pub mod store;
/* * Datadog API V1 Collection * * Collection of all Datadog Public endpoints. * * The version of the OpenAPI document: 1.0 * Contact: support@datadoghq.com * Generated by: https://openapi-generator.tech */ /// UsageLambdaHour : Number of lambda functions and sum of the invocations of all lambda functions for each hour for a given organization. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UsageLambdaHour { /// Contains the number of different functions for each region and AWS account. #[serde(rename = "func_count", skip_serializing_if = "Option::is_none")] pub func_count: Option<i64>, /// The hour for the usage. #[serde(rename = "hour", skip_serializing_if = "Option::is_none")] pub hour: Option<String>, /// Contains the sum of invocations of all functions. #[serde(rename = "invocations_sum", skip_serializing_if = "Option::is_none")] pub invocations_sum: Option<i64>, } impl UsageLambdaHour { /// Number of lambda functions and sum of the invocations of all lambda functions for each hour for a given organization. pub fn new() -> UsageLambdaHour { UsageLambdaHour { func_count: None, hour: None, invocations_sum: None, } } }
// A Pythagorean triplet is a set of three natural numbers, a < b < c, for which, // // a2 + b2 = c2 // For example, 32 + 42 = 9 + 16 = 25 = 52. // // There exists exactly one Pythagorean triplet for which a + b + c = 1000. // Find the product abc. fn main() { let limit = 1000; let mut solution = 0; for a in 1..limit / 3 { for b in a..limit / 2 { for c in 0..limit { if a * a + b * b == c * c && a + b + c == 1000 { solution = a * b * c; } } } } println!("Solution: {}", solution); }
#[doc = "Register `RESP%s` reader"] pub type R = crate::R<RESP_SPEC>; #[doc = "Field `CARDSTATUS` reader - see Table404."] pub type CARDSTATUS_R = crate::FieldReader<u32>; impl R { #[doc = "Bits 0:31 - see Table404."] #[inline(always)] pub fn cardstatus(&self) -> CARDSTATUS_R { CARDSTATUS_R::new(self.bits) } } #[doc = "The SDMMC_RESP1/2/3/4R registers contain the status of a card, which is part of the received response.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`resp::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct RESP_SPEC; impl crate::RegisterSpec for RESP_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`resp::R`](R) reader structure"] impl crate::Readable for RESP_SPEC {} #[doc = "`reset()` method sets RESP%s to value 0"] impl crate::Resettable for RESP_SPEC { const RESET_VALUE: Self::Ux = 0; }
pub mod day1; pub mod day2; pub mod day3; pub mod day4; pub mod day5; pub mod day6; pub mod graph; pub mod intcode;
extern crate pkg_config; use std::env; fn main () { let target = env::var("TARGET").unwrap(); if target.ends_with("-apple-darwin") { // Use libosxfuse on OS X pkg_config::find_library("osxfuse").unwrap(); } else if target.ends_with("-unknown-linux-gnu") || target.ends_with("-unknown-freebsd") { // Use libfuse on Linux and FreeBSD pkg_config::find_library("fuse").unwrap(); } else { // Fail on unsupported platforms (e.g. Windows) panic!("Unsupported target platform"); } }
use assert_cmd::Command; use std::fs::read_dir; use std::fs::File; use tempdir::TempDir; #[cfg(not(target_os = "windows"))] #[test] fn test_sort() { let dir = TempDir::new("nomino_test").unwrap(); let inputs = vec![ "Nomino (2020) S1.E1.1080p.mkv", "Nomino (2020) S1.E2.1080p.mkv", "Nomino (2020) S1.E3.1080p.mkv", "Nomino (2020) S1.E4.1080p.mkv", "Nomino (2020) S1.E5.1080p.mkv", ]; let mut outputs = vec!["001.mkv", "002.mkv", "003.mkv", "004.mkv", "005.mkv"]; for input in inputs { let _ = File::create(dir.path().join(input)).unwrap(); } let cmd = Command::cargo_bin(env!("CARGO_PKG_NAME")) .unwrap() .args(&["-d", dir.path().to_str().unwrap(), "-s", "asc", "{:3}.mkv"]) .unwrap(); let mut files: Vec<String> = read_dir(dir.path()) .unwrap() .map(|entry| entry.unwrap().file_name().to_str().unwrap().to_string()) .collect(); files.sort(); outputs.sort(); assert!(cmd.status.success()); assert_eq!(files.len(), outputs.len()); assert!(outputs.iter().zip(files.iter()).all(|(a, b)| a == b)); dir.close().unwrap(); }
#[macro_use] extern crate rocket; mod config; mod faces; use faces::Faces; use rocket::data::Data; use rocket_contrib::json::Json; use serde::{Serialize,Deserialize}; #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct FaceResponse { engagement_score: usize } #[post("/engagement/score", format = "image/*", data = "<image>")] async fn engagement_score(image: Data) -> Json<FaceResponse> { // Find the largest face in the image let faces = Faces::new(config::KEY, config::ENDPOINT, image).await.0; let largest_face = faces.iter().max_by_key(|face| face.face_rectangle.get_area()); // Calculate and return the engagement score match largest_face { Some(face) => Json(FaceResponse { engagement_score: face.engagement_score() }), None => Json(FaceResponse { engagement_score: 0 }) } } #[rocket::main] async fn main() { rocket::ignite() .mount("/", routes![engagement_score]) .launch().await.unwrap(); }
use crate::{instance, instance::AtomMut}; use std::{ cell::{Ref, RefMut}, sync::Arc, }; thread_local! { static ENGINE: Arc<instance::Engine> = Arc::new(instance::Engine::new()); } pub fn batch() -> Batch { ENGINE.with(|engine| Batch::new(engine.batch())) } pub fn react(f: impl FnMut() + 'static) { ENGINE.with(|engine| engine.react(f)) } #[must_use] pub struct Batch { #[allow(dead_code)] // This is only here to be dropped inner: instance::Batch, } impl Batch { pub fn new(inner: instance::Batch) -> Self { Batch { inner } } } pub struct Atom<T> { inner: instance::Atom<T>, } impl<T: 'static> Atom<T> { pub fn new(initial_value: T) -> Self { let engine = ENGINE.with(<_>::clone); Self { inner: instance::Atom::new(engine, initial_value), } } #[must_use] pub fn get(&self) -> Ref<'_, T> { self.inner.get() } #[must_use] pub fn get_mut(&self) -> AtomMut<'_, T> { self.inner.get_mut() } #[must_use] pub fn sample_mut(&self) -> RefMut<'_, T> { self.inner.sample_mut() } pub fn set(&self, value: T) { self.inner.set(value); } } impl<T: Default + 'static> Default for Atom<T> { fn default() -> Self { Self::new(T::default()) } } impl<T> Clone for Atom<T> { fn clone(&self) -> Self { Self { inner: self.inner.clone(), } } }
use crate::msg::Fee; use cosmwasm_std::Uint128; use cosmwasm_std::{StdError, StdResult}; use std::convert::TryFrom; pub const DEFAULT_TRANSACTION_FEE: Fee = Fee { commission_rate_nom: Uint128(3), commission_rate_denom: Uint128(1000), }; pub const DEFAULT_MAX_QUERY_PAGE_SIZE: u16 = 10_u16; pub const DEFAULT_MAX_PUBLIC_MESSAGE_LEN: u16 = 280_u16; pub const DEFAULT_MAX_TAG_LEN: u8 = 255_u8; pub const DEFAULT_MAX_NUMBER_OF_TAGS: u8 = 20_u8; pub const DEFAULT_MAX_THUMBNAIL_IMG_SIZE: u32 = 65536_u32; pub const DEFAULT_MAX_CONTENTS_DATA_LEN: u16 = 1024_u16; pub const DEFAULT_MAX_HANDLE_LEN: u16 = 64_u16; pub const DEFAULT_MAX_DESCRIPTION_LEN: u16 = 280_u16; pub const DEFAULT_MAX_VIEW_SETTINGS_LEN: u16 = 4096_u16; pub const DEFAULT_MAX_PRIVATE_SETTINGS_LEN: u16 = 4096_u16; pub fn valid_transaction_fee(val: Option<Fee>) -> StdResult<Fee> { match val { Some(v) => { if v.commission_rate_nom > v.commission_rate_denom { Err(StdError::generic_err("invalid fee, > 100%")) } else { Ok(v) } } None => Ok(DEFAULT_TRANSACTION_FEE), } } pub fn valid_max_query_page_size(val: Option<i32>) -> StdResult<u16> { match val { Some(v) => { if v < 1 { Err(StdError::generic_err("invalid max_query_page_size")) } else { u16::try_from(v) .or_else(|_| Err(StdError::generic_err("invalid max_query_page_size"))) } } None => Ok(DEFAULT_MAX_QUERY_PAGE_SIZE), } } // limit the max public message size to values in 1..65535, default 280 bytes pub fn valid_max_public_message_len(val: Option<i32>) -> StdResult<u16> { match val { Some(v) => { if v < 1 { Err(StdError::generic_err("invalid max_public_message_len")) } else { u16::try_from(v) .or_else(|_| Err(StdError::generic_err("invalid max_public_message_len"))) } } None => Ok(DEFAULT_MAX_PUBLIC_MESSAGE_LEN), } } // limit the max tag len to values in 1..255, default 64 bytes pub fn valid_max_tag_len(val: Option<i32>) -> StdResult<u8> { match val { Some(v) => { if v < 1 { Err(StdError::generic_err("invalid max_tag_len")) } else { u8::try_from(v).or_else(|_| Err(StdError::generic_err("invalid max_tag_len"))) } } None => Ok(DEFAULT_MAX_TAG_LEN), } } // limit the max number of tags per fardel to values in 1..255, default 10 pub fn valid_max_number_of_tags(val: Option<i32>) -> StdResult<u8> { match val { Some(v) => { if v < 1 { Err(StdError::generic_err("invalid max_number_of_tags")) } else { u8::try_from(v) .or_else(|_| Err(StdError::generic_err("invalid max_number_of_tags"))) } } None => Ok(DEFAULT_MAX_NUMBER_OF_TAGS), } } // limit the max thumbnail img size in bytes to u32, default 64K pub fn valid_max_thumbnail_img_size(val: Option<i32>) -> StdResult<u32> { match val { Some(v) => u32::try_from(v) .or_else(|_| Err(StdError::generic_err("invalid max_thumbnail_img_size"))), None => Ok(DEFAULT_MAX_THUMBNAIL_IMG_SIZE), } } // limit the max contents data to values in 1..65535, default 1024 bytes pub fn valid_max_contents_data_len(val: Option<i32>) -> StdResult<u16> { match val { Some(v) => { if v < 1 { Err(StdError::generic_err("invalid_max_contents_data_len")) } else { u16::try_from(v) .or_else(|_| Err(StdError::generic_err("invalid max_contents_data_len"))) } } None => Ok(DEFAULT_MAX_CONTENTS_DATA_LEN), } } // limit the max handle length (in bytes) to values in 8..65535, default 64 bytes pub fn valid_max_handle_len(val: Option<i32>) -> StdResult<u16> { match val { Some(v) => { if v < 8 { Err(StdError::generic_err("invalid_max_handle_length")) } else { u16::try_from(v) .or_else(|_| Err(StdError::generic_err("invalid max_handle_length"))) } } None => Ok(DEFAULT_MAX_HANDLE_LEN), } } // limit the max description length (in bytes) to values in 1..65535, default 280 bytes pub fn valid_max_description_len(val: Option<i32>) -> StdResult<u16> { match val { Some(v) => { if v < 1 { Err(StdError::generic_err("invalid_max_description_length")) } else { u16::try_from(v) .or_else(|_| Err(StdError::generic_err("invalid max_description_length"))) } } None => Ok(DEFAULT_MAX_DESCRIPTION_LEN), } } // limit the max view settings length (in bytes) to values in 1..65535, default 4096 bytes pub fn valid_max_view_settings_len(val: Option<i32>) -> StdResult<u16> { match val { Some(v) => { if v < 1 { Err(StdError::generic_err("invalid_max_view_settings_length")) } else { u16::try_from(v) .or_else(|_| Err(StdError::generic_err("invalid max_view_settings_length"))) } } None => Ok(DEFAULT_MAX_VIEW_SETTINGS_LEN), } } // limit the max private settings length (in bytes) to values in 1..65535, default 4096 bytes pub fn valid_max_private_settings_len(val: Option<i32>) -> StdResult<u16> { match val { Some(v) => { if v < 1 { Err(StdError::generic_err("invalid_max_private_settings_length")) } else { u16::try_from(v) .or_else(|_| Err(StdError::generic_err("invalid max_private_settings_length"))) } } None => Ok(DEFAULT_MAX_PRIVATE_SETTINGS_LEN), } } // check valid seal time for a fardel pub fn valid_seal_time(val: Option<i32>) -> StdResult<u64> { match val { Some(v) => u64::try_from(v).or_else(|_| Err(StdError::generic_err("invalid seal_time"))), None => Ok(0_u64), } } pub fn has_whitespace(s: &String) -> bool { let mut string_copy = s.clone(); string_copy.retain(|c| !c.is_whitespace()); return string_copy.len() != s.len(); }
use std::ops; use std::fmt; use std::mem::{self, MaybeUninit}; #[derive(Debug,Clone,PartialEq,Eq)] pub struct Vector([i32; 3]); #[derive(Debug,Clone,PartialEq,Eq)] pub struct Matrix([i32; 9]); impl Vector { pub fn new(x: i32, y: i32, z: i32) -> Self { Self([x, y, z]) } pub fn zeros() -> Self { Self([0; 3]) } #[inline] pub fn x(&self) -> i32 { self.0[0] } #[inline] pub fn y(&self) -> i32 { self.0[1] } #[inline] pub fn z(&self) -> i32 { self.0[2] } pub fn add_vec(&mut self, other: &Vector) -> &mut Self { for i in 0..3 { self.0[i] += other.0[i]; } self } pub fn mul_vec(&mut self, other: &Vector) -> &mut Self { for i in 0..3 { self.0[i] *= other.0[i]; } self } pub fn mul_scal(&mut self, scalar: i32) -> &mut Self { for i in 0..3 { self.0[i] *= scalar; } self } pub fn components(&self) -> [Vector; 3] { [ Vector::new(self[1], 0, 0), Vector::new(0, self[2], 0), Vector::new(0, 0, self[3]) ] } pub fn components2(&self) -> [Vector; 2] { let mut i = 0; let mut res: [MaybeUninit<Vector>; 2] = unsafe{MaybeUninit::uninit().assume_init()}; for j in 1..=3 { if self[j] != 0 { if i >= 2 { panic!("too many non-zero components"); } let mut v = Vector::zeros(); v[j] = self[j]; res[i] = MaybeUninit::new(v); i += 1; } } if i < 2 { panic!("too few non-zero components"); } unsafe{mem::transmute::<_, [Vector; 2]>(res)} } } impl ops::Add for &Vector { type Output = Vector; fn add(self, other: Self) -> Self::Output { let mut r = self.clone(); r.add_vec(other); r } } impl ops::Mul for &Vector { type Output = i32; fn mul(self, other: Self) -> Self::Output { self.x() * other.x() + self.y() * other.y() + self.z() * other.z() } } impl fmt::Display for Vector { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "[{}, {}, {}]", self.x(), self.y(), self.z()) } } impl ops::Index<usize> for Vector { type Output = i32; fn index(&self, rc: usize) -> &Self::Output { &self.0[rc-1] } } impl ops::IndexMut<usize> for Vector { fn index_mut(&mut self, rc: usize) -> &mut Self::Output { &mut self.0[rc-1] } } impl From<&[i32; 3]> for Vector { fn from(a: &[i32; 3]) -> Self { Vector::new(a[0], a[1], a[2]) } } impl std::cmp::PartialEq<[i32; 3]> for Vector { fn eq(&self, other: &[i32; 3]) -> bool { &self.0 == other } } impl std::convert::AsRef<[i32; 3]> for Vector { fn as_ref(&self) -> &[i32; 3] { &self.0 } } impl Matrix { pub fn zeros() -> Self { Self([0; 9]) } pub fn diag() -> Self { Self([1, 0, 0, 0, 1, 0, 0, 0, 1]) } pub fn raw(x: [i32; 9]) -> Self { Self(x) } pub fn rotation_x(cw: bool) -> Self { let s = if cw {-1} else {1}; Self([ 1, 0, 0, 0, 0, -s, 0, s, 0 ]) } pub fn rotation_y(cw: bool) -> Self { let s = if cw {-1} else {1}; Self([ 0, 0, s, 0, 1, 0, -s, 0, 0 ]) } pub fn rotation_z(cw: bool) -> Self { let s = if cw {-1} else {1}; Self([ 0, -s, 0, s, 0, 0, 0, 0, 1 ]) } pub fn mul_mat(&self, other: &Self) -> Matrix { let mut res = Self::zeros(); for i in 1..=3 { for j in 1..=3 { for k in 1..=3 { res[(i, j)] += self[(i, k)] * other[(k, j)]; } } } res } pub fn mul_vec(&self, vec: &Vector) -> Vector { let mut res = Vector::zeros(); for r in 1..=3 { for c in 1..=3 { res[r] += self[(r, c)] * vec[c]; } } res } pub fn mul_scalar(&mut self, s: i32) { for i in 0..9 { self.0[i] *= s; } } pub fn determinant(&self) -> i32 { let [a11, a12, a13, a21, a22, a23, a31, a32, a33] = self.0; a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a31*a22*a13 - a32*a23*a11 - a33*a21*a12 } // NOTE: only works when |self.determinant()| = 1 pub fn inverse(&self) -> Option<Matrix> { let det = self.determinant(); if det != 1 && det != -1 { return None } let [a, b, c, d, e, f, g, h, i] = self.0; let res = [ e*i - f*h, -b*i + c*h, b*f - c*e, -d*i + f*g, a*i - c*g, -a*f + c*d, d*h - e*g, -a*h + b*g, a*e - b*d ]; let mut m = Self::raw(res); m.mul_scalar(det); Some(m) } } impl ops::Index<(usize, usize)> for Matrix { type Output = i32; fn index(&self, rc: (usize, usize)) -> &Self::Output { &self.0[(rc.0-1)*3 + rc.1-1] } } impl ops::IndexMut<(usize, usize)> for Matrix { fn index_mut(&mut self, rc: (usize, usize)) -> &mut Self::Output { &mut self.0[(rc.0-1)*3 + rc.1-1] } } impl fmt::Display for Matrix { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for r in 1..=3 { write!(f, "|{}, {}, {}|", self[(r,1)], self[(r,2)], self[(r,3)])?; if r < 3 { write!(f, "\n")?; } } Ok(()) } } impl ops::Mul<&Vector> for &Matrix { type Output = Vector; fn mul(self, other: &Vector) -> Self::Output { self.mul_vec(other) } } impl ops::Mul for &Matrix { type Output = Matrix; fn mul(self, other: &Matrix) -> Self::Output { self.mul_mat(other) } }
let args: Vec<String> = env::args().collect(); match args.len() { 1 => panic!("Please pass port number to command line."), _ => (), } let port = &args[1]; let address = format!("localhost:{}", port); let listener = TcpListener::bind(address)?; //
#[doc = "Register `CR2` reader"] pub type R = crate::R<CR2_SPEC>; #[doc = "Register `CR2` writer"] pub type W = crate::W<CR2_SPEC>; #[doc = "Field `PVDE` reader - Power voltage detector enable"] pub type PVDE_R = crate::BitReader; #[doc = "Field `PVDE` writer - Power voltage detector enable"] pub type PVDE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `PVDFT` reader - Power voltage detector falling threshold selection"] pub type PVDFT_R = crate::FieldReader; #[doc = "Field `PVDFT` writer - Power voltage detector falling threshold selection"] pub type PVDFT_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O>; #[doc = "Field `PVDRT` reader - Power voltage detector rising threshold selection"] pub type PVDRT_R = crate::FieldReader; #[doc = "Field `PVDRT` writer - Power voltage detector rising threshold selection"] pub type PVDRT_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O>; #[doc = "Field `PVMENDAC` reader - PVMENDAC"] pub type PVMENDAC_R = crate::BitReader; #[doc = "Field `PVMENDAC` writer - PVMENDAC"] pub type PVMENDAC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `PVMENUSB` reader - PVMENUSB"] pub type PVMENUSB_R = crate::BitReader; #[doc = "Field `PVMENUSB` writer - PVMENUSB"] pub type PVMENUSB_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `IOSV` reader - IOSV"] pub type IOSV_R = crate::BitReader; #[doc = "Field `IOSV` writer - IOSV"] pub type IOSV_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `USV` reader - USV"] pub type USV_R = crate::BitReader; #[doc = "Field `USV` writer - USV"] pub type USV_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; impl R { #[doc = "Bit 0 - Power voltage detector enable"] #[inline(always)] pub fn pvde(&self) -> PVDE_R { PVDE_R::new((self.bits & 1) != 0) } #[doc = "Bits 1:3 - Power voltage detector falling threshold selection"] #[inline(always)] pub fn pvdft(&self) -> PVDFT_R { PVDFT_R::new(((self.bits >> 1) & 7) as u8) } #[doc = "Bits 4:6 - Power voltage detector rising threshold selection"] #[inline(always)] pub fn pvdrt(&self) -> PVDRT_R { PVDRT_R::new(((self.bits >> 4) & 7) as u8) } #[doc = "Bit 7 - PVMENDAC"] #[inline(always)] pub fn pvmendac(&self) -> PVMENDAC_R { PVMENDAC_R::new(((self.bits >> 7) & 1) != 0) } #[doc = "Bit 8 - PVMENUSB"] #[inline(always)] pub fn pvmenusb(&self) -> PVMENUSB_R { PVMENUSB_R::new(((self.bits >> 8) & 1) != 0) } #[doc = "Bit 9 - IOSV"] #[inline(always)] pub fn iosv(&self) -> IOSV_R { IOSV_R::new(((self.bits >> 9) & 1) != 0) } #[doc = "Bit 10 - USV"] #[inline(always)] pub fn usv(&self) -> USV_R { USV_R::new(((self.bits >> 10) & 1) != 0) } } impl W { #[doc = "Bit 0 - Power voltage detector enable"] #[inline(always)] #[must_use] pub fn pvde(&mut self) -> PVDE_W<CR2_SPEC, 0> { PVDE_W::new(self) } #[doc = "Bits 1:3 - Power voltage detector falling threshold selection"] #[inline(always)] #[must_use] pub fn pvdft(&mut self) -> PVDFT_W<CR2_SPEC, 1> { PVDFT_W::new(self) } #[doc = "Bits 4:6 - Power voltage detector rising threshold selection"] #[inline(always)] #[must_use] pub fn pvdrt(&mut self) -> PVDRT_W<CR2_SPEC, 4> { PVDRT_W::new(self) } #[doc = "Bit 7 - PVMENDAC"] #[inline(always)] #[must_use] pub fn pvmendac(&mut self) -> PVMENDAC_W<CR2_SPEC, 7> { PVMENDAC_W::new(self) } #[doc = "Bit 8 - PVMENUSB"] #[inline(always)] #[must_use] pub fn pvmenusb(&mut self) -> PVMENUSB_W<CR2_SPEC, 8> { PVMENUSB_W::new(self) } #[doc = "Bit 9 - IOSV"] #[inline(always)] #[must_use] pub fn iosv(&mut self) -> IOSV_W<CR2_SPEC, 9> { IOSV_W::new(self) } #[doc = "Bit 10 - USV"] #[inline(always)] #[must_use] pub fn usv(&mut self) -> USV_W<CR2_SPEC, 10> { USV_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "Power control register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cr2::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cr2::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct CR2_SPEC; impl crate::RegisterSpec for CR2_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`cr2::R`](R) reader structure"] impl crate::Readable for CR2_SPEC {} #[doc = "`write(|w| ..)` method takes [`cr2::W`](W) writer structure"] impl crate::Writable for CR2_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets CR2 to value 0"] impl crate::Resettable for CR2_SPEC { const RESET_VALUE: Self::Ux = 0; }
use derive_more::Display; use std::{result, str::Utf8Error}; use thiserror::Error; /// The error type returned by the [Command::parse] method. /// /// [Command::parse]: ./enum.Command.html#method.parse #[derive(Debug, Error, PartialEq, Eq)] #[error("parse error: {kind}")] pub struct ParseError { kind: ParseErrorKind, } /// A list specifying categories of Parse errors. It is meant to be used with the [ParseError] /// type. /// /// [ParseError]: ./struct.ParseError.html #[derive(Clone, Eq, PartialEq, Debug, Display)] #[allow(clippy::enum_variant_names)] pub enum ParseErrorKind { /// The client issued an invalid command (e.g. required parameters are missing). #[display(fmt = "Invalid command")] InvalidCommand, /// Non-UTF8 character encountered. #[display(fmt = "Non-UTF8 character while parsing")] InvalidUtf8, /// Invalid end-of-line character. #[display(fmt = "Invalid end-of-line")] InvalidEol, } impl ParseError { /// Returns the corresponding `ParseErrorKind` for this error. pub fn kind(&self) -> &ParseErrorKind { &self.kind } } impl From<ParseErrorKind> for ParseError { fn from(kind: ParseErrorKind) -> ParseError { ParseError { kind } } } impl From<Utf8Error> for ParseError { fn from(_: Utf8Error) -> ParseError { ParseError { kind: ParseErrorKind::InvalidUtf8, } } } /// The Result type used in this module. pub type Result<T> = result::Result<T, ParseError>;
//! An example of generating basic shapes extern crate image; extern crate line_drawing; fn draw_circle(imgbuf: &mut image::RgbaImage, xc: i32, yc: i32, r: i32) { for (x, y) in line_drawing::BresenhamCircle::new(xc, yc, r) { imgbuf.put_pixel(x as u32, y as u32, image::Rgba([255, 255, 0, 255])); } } fn draw_lines(imgbuf: &mut image::RgbaImage, lines: &[((u32, u32), (u32, u32))], anti_alias: bool) { for (p_1, p_2) in lines.iter() { let p_1 = (p_1.0 as f32, p_1.1 as f32); let p_2 = (p_2.0 as f32, p_2.1 as f32); for ((x, y), alpha) in line_drawing::XiaolinWu::<f32, i32>::new(p_1, p_2) { let pixel_color = image::Rgba([255, 0, 0, if anti_alias {(alpha*255.0) as u8} else {255} ]) ; imgbuf.put_pixel(x as u32, y as u32, pixel_color); } } } fn draw_triangle(imgbuf: &mut image::RgbaImage, points: [(u32, u32); 3], anti_alias: bool) { let lines = [ (points[0], points[1]), (points[0], points[2]), (points[1], points[2]) ]; return draw_lines(imgbuf, &lines, anti_alias); } fn draw_rectangle(imgbuf: &mut image::RgbaImage, top_left: (u32, u32), bottom_right: (u32, u32)) { let top_right = (bottom_right.0, top_left.1); let bottom_left = (top_left.0, bottom_right.1); let lines = [ (top_left, top_right), (top_right, bottom_right), (bottom_right, bottom_left), (bottom_left, top_left) ]; return draw_lines(imgbuf, &lines, false); } fn main() { let mut imgbuf: image::RgbaImage = image::ImageBuffer::new(800, 800); // triangle let tri_points = [ (400,100), (600,300), (200,300) ]; draw_triangle(&mut imgbuf, tri_points, true); draw_rectangle(&mut imgbuf, (200, 300), (600, 700)); draw_circle(&mut imgbuf, 80, 80, 50); imgbuf.save("basic_shapes.png").unwrap(); }
use std::{path::Path, process::ExitStatus, str::FromStr}; use crate::poc::TestMetadata; use chrono::{DateTime, Local}; use duct::{cmd, Expression}; pub fn cargo_command( subcommand: &str, metadata: &TestMetadata, path: impl AsRef<Path>, ) -> Expression { let command_vec = cargo_command_vec(subcommand, metadata); cmd_remove_cargo_envs(cmd(&command_vec[0], &command_vec[1..]).dir(path.as_ref())) } pub fn cargo_command_vec(subcommand: &str, metadata: &TestMetadata) -> Vec<String> { let mut command_vec = vec![String::from("cargo")]; command_vec.push(String::from(subcommand)); for flag in &metadata.cargo_flags { command_vec.push(flag.clone()); } command_vec } pub fn cmd_remove_cargo_envs(mut expression: Expression) -> Expression { for env_name in &[ "CARGO", "CARGO_HOME", "CARGO_MANIFEST_DIR", "CARGO_PKG_AUTHORS", "CARGO_PKG_DESCRIPTIOn", "CARGO_PKG_HOMEPAGE", "CARGO_PKG_LICENSE", "CARGO_PKG_LICENSE_FILE", "CARGO_PKG_NAME", "CARGO_PKG_REPOSITORY", "CARGO_PKG_VERSION", "CARGO_PKG_VERSION_MAJOR", "CARGO_PKG_VERSION_MINOR", "CARGO_PKG_VERSION_PATCH", "CARGO_PKG_VERSION_PRE", "RUSTUP_HOME", "RUSTUP_TOOLCHAIN", "RUSTUP_RECURSION", ] { expression = expression.env_remove(env_name); } expression } pub fn cmd_run_silent(expression: Expression, path: impl AsRef<Path>) -> Expression { expression.stdout_null().stderr_null().dir(path.as_ref()) } // https://man7.org/linux/man-pages/man7/signal.7.html pub fn signal_name(signal: i32) -> &'static str { match signal { 1 => "SIGHUP", 2 => "SIGINT", 3 => "SIGQUIT", 4 => "SIGILL", 5 => "SIGTRAP", 6 => "SIGABRT", 7 => "SIGBUS", 8 => "SIGFPE", 9 => "SIGKILL", 10 => "SIGUSR1", 11 => "SIGSEGV", 12 => "SIGUSR2", 13 => "SIGPIPE", 14 => "SIGALRM", 15 => "SIGTERM", 16 => "SIGTKFLT", 17 => "SIGCHLD", 18 => "SIGCONT", 19 => "SIGSTOP", 20 => "SIGTSTP", 21 => "SIGTTIN", 22 => "SIGTTOU", 23 => "SIGURG", 24 => "SIGXCPU", 25 => "SIGXFSZ", 26 => "SIGVTARLM", 27 => "SIGPROF", 28 => "SIGWINCH", 29 => "SIGIO", 30 => "SIGPWR", 31 => "SIGSYS", _ => "Unknown", } } pub fn exit_status_string(exit_status: &ExitStatus) -> String { use std::os::unix::process::ExitStatusExt; if let Some(signal) = exit_status.signal() { format!( "Terminated with signal {} ({})", signal, signal_name(signal) ) } else if let Some(return_code) = exit_status.code() { format!("Return code {}", return_code) } else { String::from("Unknown return status") } } /// Local date in "%Y-%m-%d" format pub fn today_str() -> String { let local_now: DateTime<Local> = Local::now(); local_now.format("%Y-%m-%d").to_string() } pub fn today_toml_date() -> toml::value::Datetime { FromStr::from_str(&today_str()).unwrap() }
//! A sequence of values with a given error. //! //! # Examples //! //! Quick plot. //! ```no_run //! use preexplorer::prelude::*; //! let data = (0..10).map(|i| (i..10 + i)); //! let seq_err = pre::SequenceError::new(data).plot("my_identifier").unwrap(); //! ``` //! //! Compare ``SequenceError``s. //! ```no_run //! use preexplorer::prelude::*; //! pre::SequenceErrors::new(vec![ //! pre::SequenceError::new((0..10).map(|i| (i..10 + i))), //! pre::SequenceError::new((0..10).map(|i| (i..10 + i))), //! ]) //! .plot("my_identifier").unwrap(); //! ``` // Traits pub use crate::traits::{Configurable, Plotable, Saveable}; use core::ops::Add; // Structs use average::Variance; /// Compare various ``SequenceError``s. pub mod comparison; pub use comparison::SequenceErrors; /// Sequence of values with a given error. #[derive(Debug, PartialEq, Clone)] pub struct SequenceError { data: Vec<(f64, f64)>, config: crate::configuration::Configuration, } impl SequenceError { /// Create a new ``SequenceError`` from data. /// Each dataset is processed so that the final plot shows the mean of the data set and /// an error bar of one standard deviation. /// /// # Examples /// /// From a complicated computation. /// ``` /// use preexplorer::prelude::*; /// let data = (0..10).map(|i| (i..10 + i)); /// let seq_err = pre::SequenceError::new(data); /// ``` pub fn new<I, J, T>(data: I) -> SequenceError where I: IntoIterator<Item = J>, J: IntoIterator<Item = T>, T: Into<f64>, { let data: Vec<(f64, f64)> = data .into_iter() .map(|j| { let v: Variance = j.into_iter().map(|t| t.into()).collect(); (v.mean(), v.error()) }) .collect(); let config = crate::configuration::Configuration::default(); SequenceError { data, config } } } impl Add for SequenceError { type Output = crate::SequenceErrors; fn add(self, other: crate::SequenceError) -> crate::SequenceErrors { let mut cmp = self.into(); cmp += other; cmp } } impl Configurable for SequenceError { fn configuration_mut(&mut self) -> &mut crate::configuration::Configuration { &mut self.config } fn configuration(&self) -> &crate::configuration::Configuration { &self.config } } impl Saveable for SequenceError { fn plotable_data(&self) -> String { let mut plotable_data = String::new(); for (counter, (value, error)) in self.data.clone().into_iter().enumerate() { plotable_data.push_str(&format!("{}\t{}\t{}\n", counter, value, error)); } plotable_data } } impl Plotable for SequenceError { fn plot_script(&self) -> String { let mut gnuplot_script = self.opening_plot_script(); let dashtype = match self.dashtype() { Some(dashtype) => dashtype, None => 1, }; gnuplot_script += &format!( "plot {:?} using 1:2 with {} dashtype {}, \"\" using 1:2:3 with yerrorbars \n", self.data_path(), self.style(), dashtype, ); gnuplot_script += &self.ending_plot_script(); gnuplot_script } } impl<T> From<crate::Densities<T>> for SequenceError where T: Into<f64> + core::fmt::Display + Clone, { fn from(mut densities: crate::Densities<T>) -> Self { let data: Vec<Vec<f64>> = (0..densities.data_set.len()) .map(|i| { densities.data_set[i] .realizations .iter() .map(|t| t.clone().into()) .collect() }) .collect(); let mut seq_err = SequenceError::new(data); let config = seq_err.configuration_mut(); *config = densities.configuration_mut().clone(); seq_err } } /////////////////////////////////////////////// #[cfg(test)] mod tests { use super::*; #[test] fn set_style() { let data = vec![vec![0., 1.], vec![0., 1., 2.], vec![3., 4., 5.]]; let mut seq = SequenceError::new(data); seq.set_style("points"); assert_eq!( &crate::configuration::plot::style::Style::Points, seq.style() ); } #[test] fn from_densitites() { use crate::prelude::*; let many_dens = (0..5).map(|_| pre::Density::new(0..10)); let mut densities: pre::Densities<u32> = pre::Densities::new(many_dens); densities.set_title("My title"); let seq_err = pre::SequenceError::from(densities.clone()); assert_eq!(seq_err.title(), densities.title()); } }
//! `lockdown` is an E2EE implementation for the Harmony protocol. /// Generated code from protobuf protocol files (secret service and common Harmony types). pub mod api; /// E2EE implementation for the Harmony protocol. pub mod e2ee;
#![allow(dead_code, unused_must_use, unused_imports, unstable)] use std::str::FromStr; use controller::Reader; use utils; // Each line in the database file is a 'Record' that contains the following data to be read // and serialized. pub struct Record { // Unique (?) id given to each record. pub id: u64, // The payload of each record that is Base64 encoded and JSON serialized. pub payload: String, // Any necessary metedata needed to identify the record. // TODO Base64 encoded and JSON encoded? pub metadata: String, } pub struct Factory { reader: Reader, } pub type RecordResult<T, E> = Result<T, E>; //TODO Add some factory traits here if necessary pub trait RecordFactory { fn new(reader: Reader) -> Self; fn create(&self, data: String) -> RecordResult<Record, String>; fn create_from_enc(&self, data: String) -> RecordResult<Record, String>; } impl RecordFactory for Factory { fn new(reader: Reader) -> Factory { Factory { reader: reader, } } fn create(&self, data: String) -> RecordResult<Record, String> { let vec_of_data = utils::string_slice(data); let id_num = FromStr::from_str(&*vec_of_data[0]); let id_value: u64 = match id_num { Ok(value) => value, Err(..) => -1 // This is a failure value }; Ok(Record { id: id_value, payload: vec_of_data[1].clone(), metadata: vec_of_data[2].clone(), }) } fn create_from_enc(&self, data: String) -> RecordResult<Record, String> { let vec_of_data = utils::string_slice(data); let id_num = FromStr::from_str(&*vec_of_data[0]); let id_value: u64 = match id_num { Ok(value) => value, Err(..) => -1, // This is a failure value }; let enc_payload = String::from_str(&*vec_of_data[1]); let payload = utils::decode_record(enc_payload); let enc_metadata = String::from_str(&*vec_of_data[2]); let metadata = utils::decode_record(enc_metadata); Ok(Record { id: id_value, payload: payload, metadata: metadata, }) } } #[test] fn test_create_record() { let input = String::from_str("1 payload metadata"); let expected = Record { id: 1, payload: String::from_str("payload"), metadata: String::from_str("metadata"), }; let reader = Reader::new(&Path::new("tests/base-test.txt")); let factory: Factory = RecordFactory::new(reader); let output: Record = factory.create(input).ok().expect("Parsing failed."); assert_eq!(expected.id, output.id); assert_eq!(expected.payload, output.payload); assert_eq!(expected.metadata, output.metadata); } #[test] fn test_create_from_encoded() { let input = String::from_str("5 cGF5bG9hZA== bWV0YWRhdGE="); let expected = Record { id: 5, payload: String::from_str("payload"), metadata: String::from_str("metadata"), }; let reader = Reader::new(&Path::new("tests/base-test.txt")); let factory: Factory = RecordFactory::new(reader); let output: Record = factory.create_from_enc(input).ok().expect("Parsing failed."); assert_eq!(expected.id, output.id); assert_eq!(expected.payload, output.payload); assert_eq!(expected.metadata, output.metadata); }
struct Any<'a> { a: &'a mut i32, b: &'a mut i32, } struct Point<'a> { x: &'a mut Any<'a>, y: &'a mut Any<'a>, } fn main() { let fuck = 20; let temp = Any { a: &mut 39, b: &mut 20, }; let hello = temp; let fuck2 = fuck; let mut x = &mut Point { x: &mut Any { a: &mut 10, b: &mut 20, }, y: &mut Any { a: &mut 20, b: &mut 28, }, }; { let mut y = &mut x; let mut z = &mut y; *(***z).x.a = 11; *(***z).x.b = 21; *(***z).y.a = 100; *(***z).y.b = 10; println!("{},{}", *z.x.a, *z.y.b); } let hell = 5; println!("{},{}", *x.x.a, *x.y.b); println!("{}", fuck); println!("{},{}", *hello.a, *hello.b); println!("{}", hell / 2); }
use crate::app::video::Palette; use rustzx_core::{ host::{FrameBuffer, FrameBufferSource}, zx::video::colors::{ZXBrightness, ZXColor}, }; const RGBA_PIXEL_SIZE: usize = 4; #[derive(Clone)] pub struct FrameBufferContext; pub struct RgbaFrameBuffer { buffer: Vec<u8>, palette: Palette, buffer_row_size: usize, } impl FrameBuffer for RgbaFrameBuffer { type Context = FrameBufferContext; fn new( width: usize, height: usize, _source: FrameBufferSource, _context: Self::Context, ) -> Self { Self { buffer: vec![0u8; width * height * RGBA_PIXEL_SIZE], palette: Palette::default(), buffer_row_size: width * RGBA_PIXEL_SIZE, } } fn set_color(&mut self, x: usize, y: usize, color: ZXColor, brightness: ZXBrightness) { let buffer_pos = y * self.buffer_row_size + x * RGBA_PIXEL_SIZE; self.palette .get_rgba(color, brightness) .iter() .copied() .zip(&mut self.buffer[buffer_pos..buffer_pos + RGBA_PIXEL_SIZE]) .for_each(|(source, dest)| *dest = source); } } impl RgbaFrameBuffer { pub fn rgba_data(&self) -> &[u8] { &self.buffer } }
use std::sync::Arc; use crate::*; use math::Rect; use wgpu::util::DeviceExt; #[derive(Clone)] pub struct BufferedRenderArgs { pub(crate) texture: Texture, pub(crate) shader: Shader, } impl BufferedRenderArgs { fn new<D: WgpuDevice>(desc: &Renderable<'_, Renderer<D>>) -> Self { Self { texture: desc.texture.clone(), shader: desc.shader.clone(), } } } impl PartialEq for BufferedRenderArgs { fn eq(&self, other: &Self) -> bool { Arc::ptr_eq(&self.texture.internal, &other.texture.internal) && Arc::ptr_eq(&self.shader.internal, &other.shader.internal) } } impl Eq for BufferedRenderArgs {} pub struct BufferedRenderer<Device, R> where Device: WgpuDevice, R: WgpuRenderTargetDesc<Device>, { target_desc: R, current_args: Option<BufferedRenderArgs>, verts: Vec<Vertex>, indices: Vec<u16>, pending_clear_color: Option<[f32; 4]>, view_matrix: mint::ColumnMatrix4<f32>, encoder: wgpu::CommandEncoder, device_marker: std::marker::PhantomData<Device>, } impl<Device, R> BufferedRenderer<Device, R> where Device: WgpuDevice, R: WgpuRenderTargetDesc<Device>, { pub fn new(target_desc: R, encoder: wgpu::CommandEncoder) -> Result<Self> { target_desc.begin_render()?; let identity: mint::ColumnMatrix4<f32> = glam::Mat4::IDENTITY.into(); Ok(Self { target_desc, current_args: None, verts: vec![], indices: vec![], pending_clear_color: None, view_matrix: identity, encoder, device_marker: Default::default(), }) } fn clear_immediate(&mut self, color: Color<f32>) -> Result<()> { self.flush()?; let encoder = &mut self.encoder; self.target_desc.with_view(|view| { let _rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { color_attachments: &[wgpu::RenderPassColorAttachment { view, resolve_target: None, ops: wgpu::Operations { load: wgpu::LoadOp::Clear(wgpu::Color { r: color.r as f64, g: color.g as f64, b: color.b as f64, a: color.a as f64, }), store: true, }, }], depth_stencil_attachment: None, label: None, }); Ok(()) }) } pub fn flush(&mut self) -> Result<()> { match &self.current_args { Some(args) => { let args = args.clone(); self.do_flush(&args) } _ => Ok(()), } } fn do_flush(&mut self, args: &BufferedRenderArgs) -> Result<()> { let renderer = self.target_desc.renderer().clone(); let verts = std::mem::take(&mut self.verts); let indices = std::mem::take(&mut self.indices); let pending_clear = self.pending_clear_color.take(); renderer.wgpu_device().with_device_info(|info| { let vertex_buf = info .device .create_buffer_init(&wgpu::util::BufferInitDescriptor { label: None, contents: bytemuck::cast_slice(&verts), usage: wgpu::BufferUsage::VERTEX, }); let index_buf = info .device .create_buffer_init(&wgpu::util::BufferInitDescriptor { label: None, contents: bytemuck::cast_slice(&indices), usage: wgpu::BufferUsage::INDEX, }); let bind_group = args.shader.internal.bind_params( info.device, self.target_desc.dimensions(), self.view_matrix, &args.texture, ); let load_op = match pending_clear { Some(c) => wgpu::LoadOp::Clear(wgpu::Color { r: c[0] as f64, g: c[1] as f64, b: c[2] as f64, a: c[3] as f64, }), None => wgpu::LoadOp::Load, }; // Scope render pass so that the render is encoded before the buffers are cleared. let encoder = &mut self.encoder; let indices_len = indices.len() as u32; self.target_desc.with_view(|view| { let mut rpass = args .shader .internal .begin_render_pass(view, encoder, load_op); rpass.set_bind_group(0, &bind_group, &[]); rpass.set_vertex_buffer(0, vertex_buf.slice(..)); rpass.set_index_buffer(index_buf.slice(..), wgpu::IndexFormat::Uint16); rpass.draw_indexed(0..indices_len, 0, 0..1); Ok(()) })?; self.verts.clear(); self.indices.clear(); self.current_args = None; Ok(()) }) } pub fn buffered_render( &mut self, args: &BufferedRenderArgs, verts: &[Vertex], indices: &[u16], ) -> Result<()> { if Some(args) != self.current_args.as_ref() { self.flush()?; self.current_args = Some(args.clone()); } let indices: Vec<u16> = indices .iter() .map(|idx| idx + self.verts.len() as u16) .collect(); self.verts.extend_from_slice(verts); self.indices.extend_from_slice(&indices[..]); Ok(()) } } impl<Device, R> RenderContext<Renderer<Device>> for BufferedRenderer<Device, R> where Device: WgpuDevice, R: WgpuRenderTargetDesc<Device>, { fn set_transform(&mut self, transform: mint::ColumnMatrix4<f32>) -> Result<()> { self.flush()?; self.view_matrix = transform; Ok(()) } /// Set the clear color and mark the frame buffer for clearing. The actual clear operation /// will be performed when the next batched render happens, or when `present` is called, /// whichever comes first. fn clear(&mut self, color: Color<f32>) -> Result<()> { self.flush()?; self.pending_clear_color = Some(color.into()); Ok(()) } fn fill_rect(&mut self, rect: &Rect<f32>, color: Color<f32>) -> Result<()> { let pos_topleft = glam::Vec2::from(rect.location); let pos_topright = pos_topleft + glam::vec2(rect.dimensions.x, 0.0); let pos_bottomleft = pos_topleft + glam::vec2(0.0, rect.dimensions.y); let pos_bottomright = pos_bottomleft + glam::vec2(rect.dimensions.x, 0.0); let color_arr: [f32; 4] = color.into(); let vertex_data = [ Vertex::ptc(pos_topleft, [0.0, 0.0], &color_arr), Vertex::ptc(pos_bottomleft, [0.0, 0.0], &color_arr), Vertex::ptc(pos_bottomright, [0.0, 0.0], &color_arr), Vertex::ptc(pos_topright, [0.0, 0.0], &color_arr), ]; let index_data: &[u16] = &[1, 2, 0, 2, 0, 3]; self.buffered_render( &BufferedRenderArgs { texture: self.target_desc.standard_resources().white_tex.clone(), shader: self.target_desc.standard_resources().default_shader.clone(), }, &vertex_data[..], index_data, ) } fn present(mut self) -> Result<()> { self.flush()?; if let Some(clear_color) = self.pending_clear_color { self.clear_immediate(clear_color.into())?; } let cmd = self.encoder.finish(); self.target_desc .renderer() .wgpu_device() .with_device_info(|info| { info.queue.submit(Some(cmd)); Ok(()) })?; self.target_desc.end_render(); Ok(()) } fn draw(&mut self, renderable: &Renderable<'_, Renderer<Device>>) -> Result<()> { self.buffered_render( &BufferedRenderArgs::new(renderable), renderable.verts, renderable.indices, ) } }
//std use std::process; use std::sync::{Arc, Mutex}; use std::time::Duration; //Lazy static use lazy_static; // Log pub mod log; use log::Log; // UI pub mod ui; // Stream mod stream; use stream::Stream; // Tokio use tokio::sync::oneshot; #[tokio::main] async fn main() -> Result<(), anyhow::Error> { let sample_for_ui: ui::SampleUiArcMutex = Arc::new(Mutex::new(Vec::new())); let sample_for_ui_clone = Arc::clone(&sample_for_ui); let (tx, mut rx) = oneshot::channel(); let stream_task = tokio::spawn(async move { let err_msg = |err| { Log::error(format!("error stream: {}", err)); process::exit(2); }; let stream = Stream::new(sample_for_ui).unwrap_or_else(|err| err_msg(err)); stream.play().unwrap_or_else(|err| { err_msg(err); }); loop { match rx.try_recv() { Ok(resp) => { if resp == true { break; } } Err(_) => std::thread::sleep(Duration::from_millis(150)), } } }); let drawing_task = tokio::spawn(async move { ui::draw_it(sample_for_ui_clone).await; tx.send(true).unwrap(); }); drawing_task.await?; stream_task.await?; Ok(()) }
use sdl2::event::Event; use sdl2::image::{self, InitFlag, LoadTexture}; use sdl2::pixels::Color; use specs::prelude::*; use crate::ecs::animation::*; use crate::ecs::collision::*; use crate::ecs::components::*; use crate::ecs::enemy::*; use crate::ecs::player::*; use crate::ecs::renderer; use crate::ecs::resources::*; use crate::ecs::systems::*; use crate::ecs::weapon::*; use crate::input; pub struct Engine { window_width: u32, window_height: u32, } impl Engine { pub fn new(width: u32, height: u32) -> Self { Engine { window_width: width, window_height: height, } } pub fn run(&self) { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context .video() .expect("Could not initiate video sybsystem"); let _image_context = image::init(InitFlag::PNG | InitFlag::JPG).expect("could not make image context"); let window = video_subsystem .window("rusty-sdl", self.window_width, self.window_height) .position_centered() .build() .expect("could not make window"); let mut canvas = window.into_canvas().build().expect("could not make canvas"); let texture_creator = canvas.texture_creator(); let textures = [ texture_creator .load_texture(crate::assets::PLAYER_SPRITE_PATH) .expect("could not load texture"), texture_creator .load_texture(crate::assets::BOSS_SPRITE_PATH) .expect("could not load texture"), texture_creator .load_texture(crate::assets::BULLET_SPRITE_PATH) .expect("could not load texture"), texture_creator .load_texture(crate::assets::EXPLOSION_SPRITE_PATH) .expect("could not load texture"), ]; canvas.set_draw_color(Color::RGB(0, 255, 255)); canvas.clear(); canvas.present(); let mut input = input::Input::new(); // Register systems let mut dispatcher = DispatcherBuilder::new() .with(InputSystem, "input", &[]) .with(PositionUpdateSystem, "position updater", &[]) .with(CollisionSystem::new(), "collision", &[]) .with(WeaponSystem, "weapon system", &[]) .with(LifetimeKiller, "lifetime", &[]) .with(HealthSystem, "health", &[]) .with(AnimationSystem, "animation", &[]) .with(EnemySystem, "enemy", &[]) .with(EnemySpawnerSystem::default(), "enemy spawner", &[]) .with(PlayerRespawnSystem, "player spawner", &[]) .build(); // Register required components let mut world = World::new(); world.register::<Weapon>(); world.register::<Lifetime>(); world.register::<Enemy>(); world.register::<Player>(); world.register::<Projectile>(); world.register::<Damage>(); dispatcher.setup(&mut world); renderer::SystemData::setup(&mut world); let mut last_frame_time = std::time::Instant::now(); let mut event_pump = sdl_context.event_pump().unwrap(); 'running: loop { let start_time = std::time::Instant::now(); canvas.set_draw_color(Color::RGB(0, 0, 0)); canvas.clear(); for event in event_pump.poll_iter() { match event { Event::Quit { .. } => { break 'running; } Event::KeyDown { scancode, .. } => { if let Some(scancode) = scancode { input.key_state[scancode as usize].held = true; } } Event::KeyUp { scancode, .. } => { if let Some(scancode) = scancode { input.key_state[scancode as usize].held = false; } } Event::MouseMotion { x, y, .. } => { input.mouse_pos = (x, y); } Event::MouseButtonDown { mouse_btn, .. } => { input.mouse_state[mouse_btn as usize].held = true; } Event::MouseButtonUp { mouse_btn, .. } => { input.mouse_state[mouse_btn as usize].held = false; } _ => {} } } { // Update resource state for elapsed time let elapsed_time = std::time::Instant::now() - last_frame_time; let elapsed_time = elapsed_time.as_secs_f32(); let mut delta_time = world.write_resource::<DeltaTime>(); *delta_time = DeltaTime(elapsed_time); last_frame_time = std::time::Instant::now(); } { // Update resource state for input let mut input_resource = world.write_resource::<InputResource>(); *input_resource = InputResource(input); } dispatcher.dispatch(&world); world.maintain(); renderer::render( &mut canvas, Color::RGB(0, 0, 0), &textures, world.system_data(), ) .expect("Render failed"); canvas.present(); let loop_duration = std::time::Instant::now() - start_time; if input.get_key(sdl2::keyboard::Scancode::F12).held { println!("FPS {}", 1.0 / loop_duration.as_secs_f64()); } } // Loop } }
#![feature(test)] #[cfg(test)] mod tests { extern crate test; use lazy_static::lazy_static; use std::{fs, path::Path}; use test::Bencher; lazy_static! { static ref SMALL_JSON: String = fs::read_to_string(Path::new("json/small.json")).expect( "Error loading 'json/small.json'; make sure you are operating from the root directory" ); static ref LARGE_JSON: String = fs::read_to_string(Path::new("json/large.json")).expect( "Error loading 'json/large.json'; make sure you are operating from the root directory" ); } // Serde JSON #[bench] fn serde_small_top_level(b: &mut Bencher) { b.iter(|| { let parsed: serde_json::Value = serde_json::from_str(&SMALL_JSON).unwrap(); assert_eq!(parsed["topLevelProperty"], 1); }); } #[bench] fn serde_large_top_level(b: &mut Bencher) { b.iter(|| { let parsed: serde_json::Value = serde_json::from_str(&LARGE_JSON).unwrap(); assert_eq!(parsed["topLevelProperty"], 1); }); } #[bench] fn serde_small_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed: serde_json::Value = serde_json::from_str(&SMALL_JSON).unwrap(); assert_eq!( parsed["property"]["subProperty"]["thirdLevel"]["pi"], 3.14159 ); }); } #[bench] fn serde_large_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed: serde_json::Value = serde_json::from_str(&LARGE_JSON).unwrap(); assert_eq!( parsed["property"]["subProperty"]["thirdLevel"]["pi"], 3.14159 ); }); } #[bench] fn serde_small_parse_all(b: &mut Bencher) { b.iter(|| { serde_json::from_str::<serde_json::Value>(&SMALL_JSON).unwrap(); }); } #[bench] fn serde_large_parse_all(b: &mut Bencher) { b.iter(|| { serde_json::from_str::<serde_json::Value>(&LARGE_JSON).unwrap(); }); } // SIMD JSON #[bench] fn simd_small_top_level(b: &mut Bencher) { b.iter(|| { let parsed: simd_json::OwnedValue = simd_json::serde::from_str(&mut SMALL_JSON.clone()).unwrap(); assert_eq!(parsed["topLevelProperty"], 1); }); } #[bench] fn simd_large_top_level(b: &mut Bencher) { b.iter(|| { let parsed: simd_json::OwnedValue = simd_json::serde::from_str(&mut LARGE_JSON.clone()).unwrap(); assert_eq!(parsed["topLevelProperty"], 1); }); } #[bench] fn simd_small_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed: serde_json::Value = simd_json::serde::from_str(&mut SMALL_JSON.clone()).unwrap(); // simd-json introduces floating-point precision problems, for some reason assert_eq!( parsed["property"]["subProperty"]["thirdLevel"]["pi"], 3.1415900000000003 ); }); } #[bench] fn simd_large_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed: serde_json::Value = simd_json::serde::from_str(&mut LARGE_JSON.clone()).unwrap(); assert_eq!( parsed["property"]["subProperty"]["thirdLevel"]["pi"], 3.1415900000000003 ); }); } #[bench] fn simd_small_parse_all(b: &mut Bencher) { b.iter(|| { simd_json::serde::from_str::<serde_json::Value>(&mut SMALL_JSON.clone()).unwrap(); }); } #[bench] fn simd_large_parse_all(b: &mut Bencher) { b.iter(|| { simd_json::serde::from_str::<serde_json::Value>(&mut LARGE_JSON.clone()).unwrap(); }); } // GJSON #[bench] fn gjson_small_top_level(b: &mut Bencher) { b.iter(|| { let parsed: gjson::Value = gjson::get(&SMALL_JSON, "topLevelProperty"); assert_eq!(parsed.i32(), 1); }); } #[bench] fn gjson_large_top_level(b: &mut Bencher) { b.iter(|| { let parsed: gjson::Value = gjson::get(&LARGE_JSON, "topLevelProperty"); assert_eq!(parsed.i32(), 1); }); } #[bench] fn gjson_small_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed: gjson::Value = gjson::get(&SMALL_JSON, "property.subProperty.thirdLevel.pi"); assert_eq!(parsed.f32(), 3.14159); }); } #[bench] fn gjson_large_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed: gjson::Value = gjson::get(&LARGE_JSON, "property.subProperty.thirdLevel.pi"); assert_eq!(parsed.f32(), 3.14159); }); } #[bench] #[ignore = "GJSON's parse() method delays all parsing until a property is accessed, so benchmarking is not feasible"] fn gjson_small_parse_all(b: &mut Bencher) { b.iter(|| { gjson::parse(&SMALL_JSON); }); } #[bench] #[ignore = "GJSON's parse() method delays all parsing until a property is accessed, so benchmarking is not feasible"] fn gjson_large_parse_all(b: &mut Bencher) { b.iter(|| { gjson::parse(&LARGE_JSON); }); } // A-JSON #[bench] fn ajson_small_top_level(b: &mut Bencher) { b.iter(|| { let parsed: ajson::Value = ajson::get(&SMALL_JSON, "topLevelProperty").unwrap(); assert_eq!(parsed.to_i64(), 1); }); } #[bench] fn ajson_large_top_level(b: &mut Bencher) { b.iter(|| { let parsed: ajson::Value = ajson::get(&LARGE_JSON, "topLevelProperty").unwrap(); assert_eq!(parsed.to_i64(), 1); }); } #[bench] fn ajson_small_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed: ajson::Value = ajson::get(&SMALL_JSON, "property.subProperty.thirdLevel.pi").unwrap(); assert_eq!(parsed.to_f64(), 3.14159); }); } #[bench] fn ajson_large_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed: ajson::Value = ajson::get(&LARGE_JSON, "property.subProperty.thirdLevel.pi").unwrap(); assert_eq!(parsed.to_f64(), 3.14159); }); } #[bench] #[ignore = "AJSON's parse() method delays about half of the time spent parsing until a property is accessed, so benchmarking is not feasible"] fn ajson_small_parse_all(b: &mut Bencher) { b.iter(|| { ajson::parse(&SMALL_JSON).unwrap(); }); } #[bench] #[ignore = "AJSON's parse() method delays about half of the time spent parsing until a property is accessed, so benchmarking is not feasible"] fn ajson_large_parse_all(b: &mut Bencher) { b.iter(|| { ajson::parse(&LARGE_JSON).unwrap(); }); } // json-rust #[bench] fn json_rust_small_top_level(b: &mut Bencher) { b.iter(|| { let parsed = json::parse(&SMALL_JSON).unwrap(); assert_eq!(parsed["topLevelProperty"], 1); }); } #[bench] fn json_rust_large_top_level(b: &mut Bencher) { b.iter(|| { let parsed = json::parse(&LARGE_JSON).unwrap(); assert_eq!(parsed["topLevelProperty"], 1); }); } #[bench] fn json_rust_small_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed = json::parse(&SMALL_JSON).unwrap(); assert_eq!( parsed["property"]["subProperty"]["thirdLevel"]["pi"], 3.14159 ); }); } #[bench] fn json_rust_large_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed = json::parse(&LARGE_JSON).unwrap(); assert_eq!( parsed["property"]["subProperty"]["thirdLevel"]["pi"], 3.14159 ); }); } #[bench] fn json_rust_small_parse_all(b: &mut Bencher) { b.iter(|| { json::parse(&SMALL_JSON).unwrap(); }); } #[bench] fn json_rust_large_parse_all(b: &mut Bencher) { b.iter(|| { json::parse(&LARGE_JSON).unwrap(); }); } // Pikkr #[bench] fn pikkr_stateful_small_top_level(b: &mut Bencher) { let mut parser = pikkr_annika::Pikkr::new(&vec!["$.topLevelProperty".as_bytes()], 2).unwrap(); b.iter(|| { // Pikkr has a rather low-level API // Maybe someday I'll write a wrapper for it... let parsed: u32 = String::from_utf8( parser .parse(SMALL_JSON.as_bytes()) .unwrap() .get(0) .unwrap() .unwrap() .to_vec(), ) .unwrap() .parse() .unwrap(); assert_eq!(parsed, 1); }); } #[bench] fn pikkr_stateful_large_top_level(b: &mut Bencher) { let mut parser = pikkr_annika::Pikkr::new(&vec!["$.topLevelProperty".as_bytes()], 2).unwrap(); b.iter(|| { let parsed: u32 = String::from_utf8( parser .parse(LARGE_JSON.as_bytes()) .unwrap() .get(0) .unwrap() .unwrap() .to_vec(), ) .unwrap() .parse() .unwrap(); assert_eq!(parsed, 1); }); } #[bench] fn pikkr_stateless_small_top_level(b: &mut Bencher) { b.iter(|| { let mut parser = pikkr_annika::Pikkr::new(&vec!["$.topLevelProperty".as_bytes()], 2).unwrap(); let parsed: u32 = String::from_utf8( parser .parse(SMALL_JSON.as_bytes()) .unwrap() .get(0) .unwrap() .unwrap() .to_vec(), ) .unwrap() .parse() .unwrap(); assert_eq!(parsed, 1); }); } #[bench] fn pikkr_stateless_large_top_level(b: &mut Bencher) { b.iter(|| { let mut parser = pikkr_annika::Pikkr::new(&vec!["$.topLevelProperty".as_bytes()], 2).unwrap(); let parsed: u32 = String::from_utf8( parser .parse(LARGE_JSON.as_bytes()) .unwrap() .get(0) .unwrap() .unwrap() .to_vec(), ) .unwrap() .parse() .unwrap(); assert_eq!(parsed, 1); }); } #[bench] fn pikkr_stateful_small_fourth_level(b: &mut Bencher) { let mut parser = pikkr_annika::Pikkr::new(&vec!["$.property.subProperty.thirdLevel.pi".as_bytes()], 2) .unwrap(); b.iter(|| { // Pikkr has a rather low-level API // Maybe someday I'll write a wrapper for it... let parsed: f32 = String::from_utf8( parser .parse(SMALL_JSON.as_bytes()) .unwrap() .get(0) .unwrap() .unwrap() .to_vec(), ) .unwrap() .parse() .unwrap(); assert_eq!(parsed, 3.14159); }); } #[bench] fn pikkr_stateful_large_fourth_level(b: &mut Bencher) { let mut parser = pikkr_annika::Pikkr::new(&vec!["$.property.subProperty.thirdLevel.pi".as_bytes()], 2) .unwrap(); b.iter(|| { let parsed: f32 = String::from_utf8( parser .parse(LARGE_JSON.as_bytes()) .unwrap() .get(0) .unwrap() .unwrap() .to_vec(), ) .unwrap() .parse() .unwrap(); assert_eq!(parsed, 3.14159); }); } #[bench] fn pikkr_stateless_small_fourth_level(b: &mut Bencher) { b.iter(|| { let mut parser = pikkr_annika::Pikkr::new( &vec!["$.property.subProperty.thirdLevel.pi".as_bytes()], 2, ) .unwrap(); let parsed: f32 = String::from_utf8( parser .parse(SMALL_JSON.as_bytes()) .unwrap() .get(0) .unwrap() .unwrap() .to_vec(), ) .unwrap() .parse() .unwrap(); assert_eq!(parsed, 3.14159); }); } #[bench] fn pikkr_stateless_large_fourth_level(b: &mut Bencher) { b.iter(|| { let mut parser = pikkr_annika::Pikkr::new( &vec!["$.property.subProperty.thirdLevel.pi".as_bytes()], 2, ) .unwrap(); let parsed: f32 = String::from_utf8_lossy( parser .parse(LARGE_JSON.as_bytes()) .unwrap() .get(0) .unwrap() .unwrap(), ) .parse() .unwrap(); assert_eq!(parsed, 3.14159); }); } #[bench] #[ignore = "Pikkr does not support parsing an entire JSON file at once"] fn pikkr_stateful_small_parse_all(_: &mut Bencher) {} #[bench] #[ignore = "Pikkr does not support parsing an entire JSON file at once"] fn pikkr_stateful_large_parse_all(_: &mut Bencher) {} #[bench] #[ignore = "Pikkr does not support parsing an entire JSON file at once"] fn pikkr_stateless_small_parse_all(_: &mut Bencher) {} #[bench] #[ignore = "Pikkr does not support parsing an entire JSON file at once"] fn pikkr_stateless_large_parse_all(_: &mut Bencher) {} // tinyjson #[bench] fn tinyjson_small_top_level(b: &mut Bencher) { b.iter(|| { let parsed: tinyjson::JsonValue = SMALL_JSON.parse().unwrap(); assert_eq!(*parsed["topLevelProperty"].get::<f64>().unwrap(), 1.0); }); } #[bench] fn tinyjson_large_top_level(b: &mut Bencher) { b.iter(|| { let parsed: tinyjson::JsonValue = LARGE_JSON.parse().unwrap(); assert_eq!(*parsed["topLevelProperty"].get::<f64>().unwrap(), 1.0); }); } #[bench] fn tinyjson_small_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed: tinyjson::JsonValue = SMALL_JSON.parse().unwrap(); assert_eq!( *parsed["property"]["subProperty"]["thirdLevel"]["pi"] .get::<f64>() .unwrap(), 3.14159 ); }); } #[bench] fn tinyjson_large_fourth_level(b: &mut Bencher) { b.iter(|| { let parsed: tinyjson::JsonValue = LARGE_JSON.parse().unwrap(); assert_eq!( *parsed["property"]["subProperty"]["thirdLevel"]["pi"] .get::<f64>() .unwrap(), 3.14159 ); }); } #[bench] fn tinyjson_small_parse_all(b: &mut Bencher) { b.iter(|| { SMALL_JSON.parse::<tinyjson::JsonValue>().unwrap(); }); } #[bench] fn tinyjson_large_parse_all(b: &mut Bencher) { b.iter(|| { LARGE_JSON.parse::<tinyjson::JsonValue>().unwrap(); }); } }
use super::version_number::VersionNumber; use crate::vtable::id::VTableId; use serde::{Deserialize, Serialize}; /// ID of VTable #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Serialize, Deserialize)] pub struct VersionId { pub(in crate::version) vtable_id: VTableId, pub(in crate::version) version_number: VersionNumber, } impl VersionId { pub fn new(vtable_id: &VTableId, version_number: &VersionNumber) -> Self { Self { vtable_id: vtable_id.clone(), version_number: version_number.clone(), } } pub fn vtable_id(&self) -> &VTableId { &self.vtable_id } pub fn version_number(&self) -> &VersionNumber { &self.version_number } }
pub mod id_table; pub mod matrix; pub mod tex_table; pub mod webgl;
extern crate simple_excel_writer; use simple_excel_writer as excel; use excel::*; #[test] fn creates_and_saves_an_excel_sheet() { let mut wb = excel::Workbook::create("test.xlsx"); let mut ws = wb.create_sheet("test_sheet"); wb.write_sheet(&mut ws, |sw| { sw.append_row(row!["Name", "Title", "Success"]) }).expect("Write excel error!"); wb.close().expect("Close excel error!"); }
use std::{borrow::Cow, sync::Arc}; use thiserror::Error; #[derive(Error, Debug, Clone)] pub enum Error { /// Error representing an **input** is at fault. #[error("violate domain invariance rule - {msg}")] BadInput { msg: Cow<'static, str> }, /// Authentication or Authorization is failed. #[error("auth failed")] AuthFailed, /// Some resources are not found. #[error("{resource} is not found")] NotFound { resource: &'static str }, /// Some resources are conflict. #[error("{resource} is conflict")] Conflict { resource: &'static str }, /// Error representing an **internal** is at fault. #[error(transparent)] Internal(#[from] Arc<anyhow::Error>), } impl Error { pub fn bad_input<S>(s: S) -> Self where Cow<'static, str>: From<S>, { Error::BadInput { msg: s.into() } } pub fn auth_failed() -> Self { Error::AuthFailed } pub fn not_found(resource: &'static str) -> Self { Error::NotFound { resource } } pub fn conflict(resource: &'static str) -> Self { Error::Conflict { resource } } pub fn internal<E>(e: E) -> Self where E: std::error::Error + Send + Sync + 'static, { Error::Internal(Arc::new(anyhow::Error::from(e))) } }
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::{models, API_VERSION}; #[non_exhaustive] #[derive(Debug, thiserror :: Error)] #[allow(non_camel_case_types)] pub enum Error { #[error(transparent)] Operations_List(#[from] operations::list::Error), #[error(transparent)] ResourcePools_Get(#[from] resource_pools::get::Error), #[error(transparent)] ResourcePools_Create(#[from] resource_pools::create::Error), #[error(transparent)] ResourcePools_Update(#[from] resource_pools::update::Error), #[error(transparent)] ResourcePools_Delete(#[from] resource_pools::delete::Error), #[error(transparent)] ResourcePools_List(#[from] resource_pools::list::Error), #[error(transparent)] ResourcePools_ListByResourceGroup(#[from] resource_pools::list_by_resource_group::Error), #[error(transparent)] Clusters_Get(#[from] clusters::get::Error), #[error(transparent)] Clusters_Create(#[from] clusters::create::Error), #[error(transparent)] Clusters_Update(#[from] clusters::update::Error), #[error(transparent)] Clusters_Delete(#[from] clusters::delete::Error), #[error(transparent)] Clusters_List(#[from] clusters::list::Error), #[error(transparent)] Clusters_ListByResourceGroup(#[from] clusters::list_by_resource_group::Error), #[error(transparent)] Hosts_Get(#[from] hosts::get::Error), #[error(transparent)] Hosts_Create(#[from] hosts::create::Error), #[error(transparent)] Hosts_Update(#[from] hosts::update::Error), #[error(transparent)] Hosts_Delete(#[from] hosts::delete::Error), #[error(transparent)] Hosts_List(#[from] hosts::list::Error), #[error(transparent)] Hosts_ListByResourceGroup(#[from] hosts::list_by_resource_group::Error), #[error(transparent)] Datastores_Get(#[from] datastores::get::Error), #[error(transparent)] Datastores_Create(#[from] datastores::create::Error), #[error(transparent)] Datastores_Update(#[from] datastores::update::Error), #[error(transparent)] Datastores_Delete(#[from] datastores::delete::Error), #[error(transparent)] Datastores_List(#[from] datastores::list::Error), #[error(transparent)] Datastores_ListByResourceGroup(#[from] datastores::list_by_resource_group::Error), #[error(transparent)] VCenters_Get(#[from] v_centers::get::Error), #[error(transparent)] VCenters_Create(#[from] v_centers::create::Error), #[error(transparent)] VCenters_Update(#[from] v_centers::update::Error), #[error(transparent)] VCenters_Delete(#[from] v_centers::delete::Error), #[error(transparent)] VCenters_List(#[from] v_centers::list::Error), #[error(transparent)] VCenters_ListByResourceGroup(#[from] v_centers::list_by_resource_group::Error), #[error(transparent)] VirtualMachines_Get(#[from] virtual_machines::get::Error), #[error(transparent)] VirtualMachines_Create(#[from] virtual_machines::create::Error), #[error(transparent)] VirtualMachines_Update(#[from] virtual_machines::update::Error), #[error(transparent)] VirtualMachines_Delete(#[from] virtual_machines::delete::Error), #[error(transparent)] VirtualMachines_Stop(#[from] virtual_machines::stop::Error), #[error(transparent)] VirtualMachines_Start(#[from] virtual_machines::start::Error), #[error(transparent)] VirtualMachines_Restart(#[from] virtual_machines::restart::Error), #[error(transparent)] VirtualMachines_List(#[from] virtual_machines::list::Error), #[error(transparent)] VirtualMachines_ListByResourceGroup(#[from] virtual_machines::list_by_resource_group::Error), #[error(transparent)] VirtualMachineTemplates_Get(#[from] virtual_machine_templates::get::Error), #[error(transparent)] VirtualMachineTemplates_Create(#[from] virtual_machine_templates::create::Error), #[error(transparent)] VirtualMachineTemplates_Update(#[from] virtual_machine_templates::update::Error), #[error(transparent)] VirtualMachineTemplates_Delete(#[from] virtual_machine_templates::delete::Error), #[error(transparent)] VirtualMachineTemplates_List(#[from] virtual_machine_templates::list::Error), #[error(transparent)] VirtualMachineTemplates_ListByResourceGroup(#[from] virtual_machine_templates::list_by_resource_group::Error), #[error(transparent)] VirtualNetworks_Get(#[from] virtual_networks::get::Error), #[error(transparent)] VirtualNetworks_Create(#[from] virtual_networks::create::Error), #[error(transparent)] VirtualNetworks_Update(#[from] virtual_networks::update::Error), #[error(transparent)] VirtualNetworks_Delete(#[from] virtual_networks::delete::Error), #[error(transparent)] VirtualNetworks_List(#[from] virtual_networks::list::Error), #[error(transparent)] VirtualNetworks_ListByResourceGroup(#[from] virtual_networks::list_by_resource_group::Error), #[error(transparent)] InventoryItems_Get(#[from] inventory_items::get::Error), #[error(transparent)] InventoryItems_Create(#[from] inventory_items::create::Error), #[error(transparent)] InventoryItems_Delete(#[from] inventory_items::delete::Error), #[error(transparent)] InventoryItems_ListByVCenter(#[from] inventory_items::list_by_v_center::Error), #[error(transparent)] HybridIdentityMetadata_Get(#[from] hybrid_identity_metadata::get::Error), #[error(transparent)] HybridIdentityMetadata_Create(#[from] hybrid_identity_metadata::create::Error), #[error(transparent)] HybridIdentityMetadata_Delete(#[from] hybrid_identity_metadata::delete::Error), #[error(transparent)] HybridIdentityMetadata_ListByVm(#[from] hybrid_identity_metadata::list_by_vm::Error), #[error(transparent)] MachineExtensions_Get(#[from] machine_extensions::get::Error), #[error(transparent)] MachineExtensions_CreateOrUpdate(#[from] machine_extensions::create_or_update::Error), #[error(transparent)] MachineExtensions_Update(#[from] machine_extensions::update::Error), #[error(transparent)] MachineExtensions_Delete(#[from] machine_extensions::delete::Error), #[error(transparent)] MachineExtensions_List(#[from] machine_extensions::list::Error), #[error(transparent)] GuestAgents_Get(#[from] guest_agents::get::Error), #[error(transparent)] GuestAgents_Create(#[from] guest_agents::create::Error), #[error(transparent)] GuestAgents_Delete(#[from] guest_agents::delete::Error), #[error(transparent)] GuestAgents_ListByVm(#[from] guest_agents::list_by_vm::Error), } pub mod operations { use super::{models, API_VERSION}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<models::OperationsList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/providers/Microsoft.ConnectedVMwarevSphere/operations", operation_config.base_path(), ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::OperationsList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod resource_pools { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, resource_pool_name: &str, ) -> std::result::Result<models::ResourcePool, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/resourcePools/{}", operation_config.base_path(), subscription_id, resource_group_name, resource_pool_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ResourcePool = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, resource_pool_name: &str, body: Option<&models::ResourcePool>, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/resourcePools/{}", operation_config.base_path(), subscription_id, resource_group_name, resource_pool_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(create::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ResourcePool = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::ResourcePool = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::ResourcePool), Created201(models::ResourcePool), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, resource_pool_name: &str, body: Option<&models::ResourcePatch>, ) -> std::result::Result<models::ResourcePool, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/resourcePools/{}", operation_config.base_path(), subscription_id, resource_group_name, resource_pool_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ResourcePool = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, resource_pool_name: &str, force: Option<bool>, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/resourcePools/{}", operation_config.base_path(), subscription_id, resource_group_name, resource_pool_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(force) = force { url.query_pairs_mut().append_pair("force", force.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<models::ResourcePoolsList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.ConnectedVMwarevSphere/resourcePools", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ResourcePoolsList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<models::ResourcePoolsList, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/resourcePools", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ResourcePoolsList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod clusters { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, cluster_name: &str, ) -> std::result::Result<models::Cluster, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/clusters/{}", operation_config.base_path(), subscription_id, resource_group_name, cluster_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Cluster = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, cluster_name: &str, body: Option<&models::Cluster>, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/clusters/{}", operation_config.base_path(), subscription_id, resource_group_name, cluster_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(create::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Cluster = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::Cluster = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::Cluster), Created201(models::Cluster), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, cluster_name: &str, body: Option<&models::ResourcePatch>, ) -> std::result::Result<models::Cluster, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/clusters/{}", operation_config.base_path(), subscription_id, resource_group_name, cluster_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Cluster = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, cluster_name: &str, force: Option<bool>, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/clusters/{}", operation_config.base_path(), subscription_id, resource_group_name, cluster_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(force) = force { url.query_pairs_mut().append_pair("force", force.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<models::ClustersList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.ConnectedVMwarevSphere/clusters", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ClustersList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<models::ClustersList, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/clusters", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ClustersList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod hosts { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, host_name: &str, ) -> std::result::Result<models::Host, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/hosts/{}", operation_config.base_path(), subscription_id, resource_group_name, host_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Host = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, host_name: &str, body: Option<&models::Host>, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/hosts/{}", operation_config.base_path(), subscription_id, resource_group_name, host_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(create::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Host = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::Host = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::Host), Created201(models::Host), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, host_name: &str, body: Option<&models::ResourcePatch>, ) -> std::result::Result<models::Host, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/hosts/{}", operation_config.base_path(), subscription_id, resource_group_name, host_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Host = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, host_name: &str, force: Option<bool>, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/hosts/{}", operation_config.base_path(), subscription_id, resource_group_name, host_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(force) = force { url.query_pairs_mut().append_pair("force", force.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<models::HostsList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.ConnectedVMwarevSphere/hosts", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::HostsList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<models::HostsList, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/hosts", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::HostsList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod datastores { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, datastore_name: &str, ) -> std::result::Result<models::Datastore, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/datastores/{}", operation_config.base_path(), subscription_id, resource_group_name, datastore_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Datastore = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, datastore_name: &str, body: Option<&models::Datastore>, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/datastores/{}", operation_config.base_path(), subscription_id, resource_group_name, datastore_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(create::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Datastore = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::Datastore = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::Datastore), Created201(models::Datastore), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, datastore_name: &str, body: Option<&models::ResourcePatch>, ) -> std::result::Result<models::Datastore, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/datastores/{}", operation_config.base_path(), subscription_id, resource_group_name, datastore_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Datastore = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, datastore_name: &str, force: Option<bool>, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/datastores/{}", operation_config.base_path(), subscription_id, resource_group_name, datastore_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(force) = force { url.query_pairs_mut().append_pair("force", force.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<models::DatastoresList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.ConnectedVMwarevSphere/datastores", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatastoresList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<models::DatastoresList, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/datastores", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatastoresList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod v_centers { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, vcenter_name: &str, ) -> std::result::Result<models::VCenter, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/vcenters/{}", operation_config.base_path(), subscription_id, resource_group_name, vcenter_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VCenter = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, vcenter_name: &str, body: Option<&models::VCenter>, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/vcenters/{}", operation_config.base_path(), subscription_id, resource_group_name, vcenter_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(create::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VCenter = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::VCenter = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::VCenter), Created201(models::VCenter), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, vcenter_name: &str, body: Option<&models::ResourcePatch>, ) -> std::result::Result<models::VCenter, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/vcenters/{}", operation_config.base_path(), subscription_id, resource_group_name, vcenter_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VCenter = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, vcenter_name: &str, force: Option<bool>, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/vcenters/{}", operation_config.base_path(), subscription_id, resource_group_name, vcenter_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(force) = force { url.query_pairs_mut().append_pair("force", force.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<models::VCentersList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.ConnectedVMwarevSphere/vcenters", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VCentersList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<models::VCentersList, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/vcenters", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VCentersList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod virtual_machines { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, ) -> std::result::Result<models::VirtualMachine, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachine = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, body: Option<&models::VirtualMachine>, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(create::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachine = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachine = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::VirtualMachine), Created201(models::VirtualMachine), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, body: Option<&models::VirtualMachineUpdate>, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachine = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachine = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::VirtualMachine), Created201(models::VirtualMachine), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, force: Option<bool>, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(force) = force { url.query_pairs_mut().append_pair("force", force.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn stop( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, body: Option<&models::StopVirtualMachineOptions>, ) -> std::result::Result<stop::Response, stop::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/stop", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name ); let mut url = url::Url::parse(url_str).map_err(stop::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(stop::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(stop::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(stop::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(stop::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(stop::Response::Ok200), http::StatusCode::ACCEPTED => Ok(stop::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| stop::Error::DeserializeError(source, rsp_body.clone()))?; Err(stop::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod stop { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn start( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, ) -> std::result::Result<start::Response, start::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/start", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name ); let mut url = url::Url::parse(url_str).map_err(start::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(start::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(start::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(start::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(start::Response::Ok200), http::StatusCode::ACCEPTED => Ok(start::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| start::Error::DeserializeError(source, rsp_body.clone()))?; Err(start::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod start { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn restart( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, ) -> std::result::Result<restart::Response, restart::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/restart", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name ); let mut url = url::Url::parse(url_str).map_err(restart::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(restart::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(restart::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(restart::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(restart::Response::Ok200), http::StatusCode::ACCEPTED => Ok(restart::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| restart::Error::DeserializeError(source, rsp_body.clone()))?; Err(restart::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod restart { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<models::VirtualMachinesList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachinesList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<models::VirtualMachinesList, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachinesList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod virtual_machine_templates { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_template_name: &str, ) -> std::result::Result<models::VirtualMachineTemplate, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachineTemplates/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_template_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachineTemplate = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_template_name: &str, body: Option<&models::VirtualMachineTemplate>, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachineTemplates/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_template_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(create::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachineTemplate = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachineTemplate = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::VirtualMachineTemplate), Created201(models::VirtualMachineTemplate), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_template_name: &str, body: Option<&models::ResourcePatch>, ) -> std::result::Result<models::VirtualMachineTemplate, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachineTemplates/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_template_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachineTemplate = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_template_name: &str, force: Option<bool>, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachineTemplates/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_template_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(force) = force { url.query_pairs_mut().append_pair("force", force.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<models::VirtualMachineTemplatesList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachineTemplates", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachineTemplatesList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<models::VirtualMachineTemplatesList, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachineTemplates", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualMachineTemplatesList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod virtual_networks { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_network_name: &str, ) -> std::result::Result<models::VirtualNetwork, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_network_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualNetwork = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_network_name: &str, body: Option<&models::VirtualNetwork>, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_network_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(create::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualNetwork = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::VirtualNetwork = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::VirtualNetwork), Created201(models::VirtualNetwork), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_network_name: &str, body: Option<&models::ResourcePatch>, ) -> std::result::Result<models::VirtualNetwork, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_network_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualNetwork = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_network_name: &str, force: Option<bool>, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_network_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(force) = force { url.query_pairs_mut().append_pair("force", force.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<models::VirtualNetworksList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualNetworksList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<models::VirtualNetworksList, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VirtualNetworksList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod inventory_items { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, vcenter_name: &str, inventory_item_name: &str, ) -> std::result::Result<models::InventoryItem, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/vcenters/{}/inventoryItems/{}", operation_config.base_path(), subscription_id, resource_group_name, vcenter_name, inventory_item_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::InventoryItem = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, vcenter_name: &str, inventory_item_name: &str, body: Option<&models::InventoryItem>, ) -> std::result::Result<models::InventoryItem, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/vcenters/{}/inventoryItems/{}", operation_config.base_path(), subscription_id, resource_group_name, vcenter_name, inventory_item_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(create::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::InventoryItem = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, vcenter_name: &str, inventory_item_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/vcenters/{}/inventoryItems/{}", operation_config.base_path(), subscription_id, resource_group_name, vcenter_name, inventory_item_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_v_center( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, vcenter_name: &str, ) -> std::result::Result<models::InventoryItemsList, list_by_v_center::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/vcenters/{}/inventoryItems", operation_config.base_path(), subscription_id, resource_group_name, vcenter_name ); let mut url = url::Url::parse(url_str).map_err(list_by_v_center::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_v_center::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_v_center::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_v_center::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::InventoryItemsList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_v_center::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_v_center::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_v_center::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_v_center { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod hybrid_identity_metadata { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, metadata_name: &str, ) -> std::result::Result<models::HybridIdentityMetadata, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/hybridIdentityMetadata/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name, metadata_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::HybridIdentityMetadata = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, metadata_name: &str, body: Option<&models::HybridIdentityMetadata>, ) -> std::result::Result<models::HybridIdentityMetadata, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/hybridIdentityMetadata/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name, metadata_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(create::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::HybridIdentityMetadata = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, metadata_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/hybridIdentityMetadata/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name, metadata_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_vm( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, ) -> std::result::Result<models::HybridIdentityMetadataList, list_by_vm::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/hybridIdentityMetadata", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name ); let mut url = url::Url::parse(url_str).map_err(list_by_vm::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_vm::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_vm::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_vm::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::HybridIdentityMetadataList = serde_json::from_slice(rsp_body).map_err(|source| list_by_vm::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list_by_vm::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_vm::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_vm { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod machine_extensions { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, name: &str, extension_name: &str, ) -> std::result::Result<models::MachineExtension, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/extensions/{}", operation_config.base_path(), subscription_id, resource_group_name, name, extension_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MachineExtension = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, name: &str, extension_name: &str, extension_parameters: &models::MachineExtension, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/extensions/{}", operation_config.base_path(), subscription_id, resource_group_name, name, extension_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(extension_parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MachineExtension = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::MachineExtension = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::MachineExtension), Created201(models::MachineExtension), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, name: &str, extension_name: &str, extension_parameters: &models::MachineExtensionUpdate, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/extensions/{}", operation_config.base_path(), subscription_id, resource_group_name, name, extension_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(extension_parameters).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MachineExtension = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::MachineExtension = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::MachineExtension), Created201(models::MachineExtension), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, name: &str, extension_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/extensions/{}", operation_config.base_path(), subscription_id, resource_group_name, name, extension_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, name: &str, expand: Option<&str>, ) -> std::result::Result<models::MachineExtensionsListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/extensions", operation_config.base_path(), subscription_id, resource_group_name, name ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(expand) = expand { url.query_pairs_mut().append_pair("$expand", expand); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MachineExtensionsListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod guest_agents { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, name: &str, ) -> std::result::Result<models::GuestAgent, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/guestAgents/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name, name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::GuestAgent = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, name: &str, body: Option<&models::GuestAgent>, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/guestAgents/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name, name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(create::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::GuestAgent = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::GuestAgent = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::GuestAgent), Created201(models::GuestAgent), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/guestAgents/{}", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name, name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_vm( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, virtual_machine_name: &str, ) -> std::result::Result<models::GuestAgentList, list_by_vm::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{}/guestAgents", operation_config.base_path(), subscription_id, resource_group_name, virtual_machine_name ); let mut url = url::Url::parse(url_str).map_err(list_by_vm::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_vm::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_vm::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_vm::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::GuestAgentList = serde_json::from_slice(rsp_body).map_err(|source| list_by_vm::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list_by_vm::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_vm::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_vm { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } }
use rusqlite::{params, OptionalExtension, Transaction}; /// This schema migration splits the global state table into /// separate tables containing L1 and L2 data. /// /// In addition, it also adds a refs table which only contains a single column. /// This columns references the latest Starknet block for which the L1 and L2 /// states are the same. pub(crate) fn migrate(transaction: &Transaction<'_>) -> anyhow::Result<()> { // Create the new L1 table. transaction.execute( r"CREATE TABLE l1_state ( starknet_block_number INTEGER PRIMARY KEY, starknet_global_root BLOB NOT NULL, ethereum_block_hash BLOB NOT NULL, ethereum_block_number INTEGER NOT NULL, ethereum_transaction_hash BLOB NOT NULL, ethereum_transaction_index INTEGER NOT NULL, ethereum_log_index INTEGER NOT NULL )", [], )?; // Create the new L2 table transaction.execute( r"CREATE TABLE starknet_blocks ( number INTEGER PRIMARY KEY, hash BLOB NOT NULL, root BLOB NOT NULL, timestamp INTEGER NOT NULL, transactions BLOB, transaction_receipts BLOB )", [], )?; // Add new L1 L2 state table. This will track the latest Starknet block // for which L1 and L2 agree. transaction.execute( "CREATE TABLE refs (idx INTEGER PRIMARY KEY, l1_l2_head BLOB)", [], )?; // Migrate existing L1 data. transaction.execute( r"INSERT INTO l1_state ( starknet_block_number, starknet_global_root, ethereum_block_hash, ethereum_block_number, ethereum_transaction_hash, ethereum_transaction_index, ethereum_log_index) SELECT global_state.starknet_block_number, global_state.starknet_global_root, ethereum_blocks.hash, ethereum_blocks.number, ethereum_transactions.hash, ethereum_transactions.idx, global_state.ethereum_log_index FROM global_state JOIN ethereum_transactions ON global_state.ethereum_transaction_hash = ethereum_transactions.hash JOIN ethereum_blocks ON ethereum_transactions.block_hash = ethereum_blocks.hash", [], )?; // Migrate existing L2 data. Transactions are left empty, since we // did not store this data yet. This does not require re-downloading // as these migrations only affect developer data. transaction.execute( r"INSERT INTO starknet_blocks (number, hash, root, timestamp) SELECT old.starknet_block_number, old.starknet_block_hash, old.starknet_global_root, old.starknet_block_timestamp FROM global_state old", [], )?; // Get the latest starknet block number and set the L1-L2 head reference to it. // This will default to null if no such number exists at all. // // This latest block is the L1-L2 head because schema 2 tracked L1 and L2 in lock-step. let latest: Option<u64> = transaction .query_row( r"SELECT starknet_block_number FROM global_state ORDER BY starknet_block_number DESC LIMIT 1", [], |row| row.get(0), ) .optional()?; transaction.execute( "INSERT INTO refs (idx, l1_l2_head) VALUES (?, ?)", params![1, latest], )?; // drop the old state table and ethereum tables. transaction.execute("DROP TABLE global_state", [])?; transaction.execute("DROP TABLE ethereum_transactions", [])?; transaction.execute("DROP TABLE ethereum_blocks", [])?; Ok(()) }
use std::collections::{HashMap, HashSet}; fn main() { let input = String::from_utf8(std::fs::read("input/day21").unwrap()).unwrap(); let mut allergens = HashMap::<&str, Vec<usize>>::new(); let mut food = Vec::<HashSet<&str>>::new(); input.split_terminator('\n').for_each(|line| { let mut line = line.split(" (contains "); let possible_food: HashSet<_> = line.next().unwrap().split(' ').collect(); let food_index = food.len(); food.push(possible_food); line.next() .unwrap() .strip_suffix(')') .unwrap() .split(", ") .for_each(|allergen| { let allergen = allergens.entry(allergen).or_insert_with(|| Vec::new()); allergen.push(food_index); }); }); let mut allergenic_food = HashMap::<&str, &str>::new(); let mut processed_allergens = HashSet::<&str>::new(); loop { let old_len = processed_allergens.len(); for (&allergen, possible_food) in allergens.iter() { if processed_allergens.contains(&allergen) { continue; } let mut possible_food = possible_food.iter().map(|&food_index| &food[food_index]); let intersection: HashSet<_> = possible_food .next() .unwrap() .iter() .copied() .filter(|food| !allergenic_food.contains_key(food)) .collect(); let intersection = possible_food.fold(intersection, |intersection, possible_food| { intersection.intersection(possible_food).copied().collect() }); if intersection.len() == 1 { allergenic_food.insert(intersection.into_iter().next().unwrap(), allergen); processed_allergens.insert(allergen); } } if processed_allergens.len() == allergens.len() { break; } assert_ne!(processed_allergens.len(), old_len); } let part1 = food .iter() .flat_map(|food| food.iter().copied()) .filter(|&food| !allergenic_food.contains_key(&food)) .count(); println!("{}", part1); let mut part2: Vec<_> = allergenic_food.into_iter().collect(); part2.sort_by_key(|&(_, allergen)| allergen); let part2 = part2 .into_iter() .map(|(food, _)| food) .collect::<Vec<_>>() .join(","); println!("{}", part2); }
use pairing::{Engine, Field}; use bellman::SynthesisError; use rand::{Rand, Rng, thread_rng}; use merlin::Transcript; use crate::cs::{SynthesisDriver, Circuit, Backend, Variable, Coeff}; use crate::srs::SRS; use crate::transcript::ProvingTranscript; use crate::polynomials::{Polynomial, poly_comm, poly_comm_opening, SxEval, add_polynomials, mul_polynomials}; use crate::utils::*; use crate::traits::{Commitment, PolyEngine}; pub const NUM_BLINDINGS: usize = 4; #[derive(Clone, Debug, Eq, PartialEq)] pub struct Proof<E: Engine, PE: PolyEngine> { /// A commitment of `r(X, 1)` pub r_comm: PE::Commitment, /// A commitment of `t(X, y)`. `y` represents a random challenge from the verifier. pub t_comm: PE::Commitment, /// An evaluation `r(z, 1)`. `z` represents a random challenge from the verifier. pub r_z1: E::Fr, /// An evaluation `r(z, y)`. `y` and `z` represent a random challenge from the verifier. pub r_zy: E::Fr, /// An opening of `t(z, y)` and `r(z, 1)` which are evaluated at `X = z`. pub z_opening: E::G1Affine, /// An opening of `r(z, y)` which are evaluated at `X = yz`. pub yz_opening: E::G1Affine, } impl<E: Engine, PE: PolyEngine<Pairing = E>> Proof<E, PE> { pub fn create_proof<C: Circuit<E>, S: SynthesisDriver>( circuit: &C, srs: &SRS<E> ) -> Result<Self, SynthesisError> { let mut wires = Wires { a: vec![], b: vec![], c: vec![], }; // Synthesize polynomial coefficients from circuit S::synthesize(&mut wires, circuit)?; let n = wires.a.len(); // TODO: Make better entropy let rng = &mut thread_rng(); let mut transcript = Transcript::new(&[]); // ------------------------------------------------------ // zkP_1(info, a, b, c) -> R: // ------------------------------------------------------ // c_{n+1}, c_{n+2}, c_{n+3}, c_{n+4} <- F_p let blindings: Vec<E::Fr> = (0..NUM_BLINDINGS) .into_iter() .map(|_| E::Fr::rand(rng)) .collect(); // A coefficients vector which can be used in common with polynomials r and r' // associated with powers for X. let mut r_x1 = wires.b; // X^{-n}...X^{-1} r_x1.extend(wires.c); // X^{-2n}...X^{-n-1} r_x1.extend(blindings); // X^{-2n-4}...X^{-2n-1} r_x1.reverse(); r_x1.push(E::Fr::zero()); r_x1.extend(wires.a); // X^{1}...X^{n} let r_comm = Polynomial::from_slice(&mut r_x1[..]).commit::<PE>( n, 2*n + NUM_BLINDINGS, n, &srs ); // A prover commits polynomial transcript.commit_point::<PE>(&r_comm); // ------------------------------------------------------ // zkV -> zkP: Send y <- F_p to prover // ------------------------------------------------------ // A varifier send to challenge scalar to prover let y: E::Fr = transcript.challenge_scalar(); let y_inv = y.inverse().ok_or(SynthesisError::DivisionByZero)?; let y_first_power = y_inv.pow(&[(2 * n + NUM_BLINDINGS) as u64]); // ------------------------------------------------------ // zkP_2(y) -> T: // ------------------------------------------------------ // powers: [-2n-4, n] let mut r_xy = r_x1.clone(); // Evaluate the polynomial r(X, Y) at y eval_bivar_poly::<E>( &mut r_xy, y_first_power, y, ); // negative powers [-1, -n], positive [1, 2n] of Polynomial s(X, y) let (mut s_neg_poly, s_pos_poly) = { let mut sx_poly = SxEval::new(y, n)?; S::synthesize(&mut sx_poly, circuit)?; sx_poly.neg_pos_poly() }; // Evaluate the polynomial r'(X, Y) = r(X, Y) + s(X, Y) at y let mut r_xy_prime = r_xy.clone(); { // extend to have powers [n+1, 2n] for w_i(Y)X^{i+n} r_xy_prime.resize(4 * n + 1 + NUM_BLINDINGS, E::Fr::zero()); // negative powers: [-n, -1] s_neg_poly.reverse(); let neg_poly_len = s_neg_poly.len(); // Add negative powers [-n, -1] add_polynomials::<E>(&mut r_xy_prime[(neg_poly_len + NUM_BLINDINGS)..(2 * n + NUM_BLINDINGS)], &s_neg_poly[..]); s_neg_poly.reverse(); // Add positive powers [1, 2n] add_polynomials::<E>(&mut r_xy_prime[(2 * n + 1 + NUM_BLINDINGS)..], &s_pos_poly[..]); } // Compute t(X, y) = r(X, 1) * r'(X, y) let mut t_xy = mul_polynomials::<E>(&r_x1[..], &r_xy_prime[..])?; // the constant term of t(X,Y) is zero t_xy[4 * n + 2 * NUM_BLINDINGS] = E::Fr::zero(); // -k(y) // commitment of t(X, y) let mut t_comm_vec = t_xy[..(4 * n + 2 * NUM_BLINDINGS)].iter() .chain_ext(t_xy[(4 * n + 2 * NUM_BLINDINGS + 1)..].iter()) .map(|e| *e) .collect::<Vec<_>>(); let t_comm = Polynomial::from_slice(&mut t_comm_vec[..]) .commit::<PE>( srs.d, 4 * n + 2 * NUM_BLINDINGS, 3 * n, srs ); transcript.commit_point::<PE>(&t_comm); // ------------------------------------------------------ // zkV -> zkP: Send z <- F_p to prover // ------------------------------------------------------ // A varifier send to challenge scalar to prover let z: E::Fr = transcript.challenge_scalar(); let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?; let z_first_power = z_inv.pow(&[(2 * n + NUM_BLINDINGS) as u64]); // ------------------------------------------------------ // zkP_3(z) -> (a, W_a, b, W_b, W_t, s, sc): // ------------------------------------------------------ // r(X, 1) -> r(z, 1) let r_z1 = eval_univar_poly::<E>(&r_x1, z_first_power, z); transcript.commit_scalar(&r_z1); // Ensure: r(X, 1) -> r(yz, 1) = r(z, y) // let r_zy = evaluate_poly(&r_x1, z_first_power, z*y); // r(X, y) -> r(z, y) let r_zy = eval_univar_poly::<E>(&r_xy, z_first_power, z); transcript.commit_scalar(&r_zy); let r1: E::Fr = transcript.challenge_scalar(); // An opening of r(X, 1) at yz let yz_opening = { // r(X, 1) - r(z, y) // substract constant term from r(X, 1) r_x1[2 * n + NUM_BLINDINGS].sub_assign(&r_zy); let mut point = y; point.mul_assign(&z); poly_comm_opening( 2 * n + NUM_BLINDINGS, n, srs, &r_x1, point ) }; assert_eq!(r_x1.len(), 3*n + NUM_BLINDINGS + 1); // An opening of t(X, y) and r(X, 1) at z let z_opening = { // Add constant term r_x1[(2 * n + NUM_BLINDINGS)].add_assign(&r_zy); let r_x1_len = r_x1.len(); // Batching polynomial commitments t(X, y) and r(X, 1) // powers domain: [-2n-4, n] mul_add_poly::<E>( &mut t_xy[(2 * n + NUM_BLINDINGS)..(2 * n + NUM_BLINDINGS + r_x1_len)], &r_x1[..], r1 ); // Evaluate t(X, y) at z let t_zy = { let z4_first_power = z_inv.pow(&[(4 * n + 2 * NUM_BLINDINGS) as u64]); eval_univar_poly::<E>(&t_xy, z4_first_power, z) }; // Sub constant term t_xy[(4 * n + 2 * NUM_BLINDINGS)].sub_assign(&t_zy); poly_comm_opening( 4 * n + 2 * NUM_BLINDINGS, 3 * n, srs, &t_xy, z ) }; Ok( Proof { r_comm, t_comm, r_z1, r_zy, z_opening, yz_opening, } ) } } /// Three vectors representing the left inputs, right inputs, and outputs of /// multiplication constraints respectively in sonic's constraint system. /// Basically, these are value of a variable. struct Wires<E: Engine> { a: Vec<E::Fr>, b: Vec<E::Fr>, c: Vec<E::Fr> } impl<'a, E: Engine> Backend<E> for &'a mut Wires<E> { fn new_multiplication_gate(&mut self) { self.a.push(E::Fr::zero()); self.b.push(E::Fr::zero()); self.c.push(E::Fr::zero()); } fn get_var(&self, var: Variable) -> Option<E::Fr> { Some(match var { Variable::A(index) => { self.a[index - 1] }, Variable::B(index) => { self.b[index - 1] }, Variable::C(index) => { self.c[index - 1] } }) } fn set_var<F>(&mut self, var: Variable, value: F) -> Result<(), SynthesisError> where F: FnOnce() -> Result<E::Fr, SynthesisError> { let value = value()?; match var { Variable::A(index) => { self.a[index - 1] = value; }, Variable::B(index) => { self.b[index - 1] = value; }, Variable::C(index) => { self.c[index - 1] = value; }, } Ok(()) } } pub struct SxyAdvice<E: Engine, PE: PolyEngine> { pub s_comm: PE::Commitment, pub s_zy_opening: E::G1Affine, // TODO: W opening type pub s_zy: E::Fr, // s(z, y) } impl<E: Engine, PE: PolyEngine<Pairing = E>> SxyAdvice<E, PE> { pub fn create_advice<C: Circuit<E>, S: SynthesisDriver> ( circuit: &C, proof: &Proof<E, PE>, srs: &SRS<E>, n: usize, ) -> Result<Self, SynthesisError> { let y: E::Fr; let z: E::Fr; { let mut transcript = Transcript::new(&[]); transcript.commit_point::<PE>(&proof.r_comm); y = transcript.challenge_scalar(); transcript.commit_point::<PE>(&proof.t_comm); z = transcript.challenge_scalar(); } let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?; let (s_neg_poly, s_pos_poly) = { let mut sx_poly = SxEval::new(y, n)?; S::synthesize(&mut sx_poly, circuit)?; sx_poly.neg_pos_poly() }; // a commitment to s(X, y) let s_comm = poly_comm::<_, _, PE>( srs.d, n, 2 * n, srs, s_neg_poly.iter() .chain_ext(s_pos_poly.iter()) ); // Evaluate s(X, y) at z let mut s_zy = E::Fr::zero(); s_zy.add_assign(&eval_univar_poly::<E>(&s_neg_poly[..], z_inv, z_inv)); s_zy.add_assign(&eval_univar_poly::<E>(&s_pos_poly[..], z, z)); let s_zy_opening = { s_zy.negate(); poly_comm_opening( n, 2 * n, srs, s_neg_poly.iter().rev() .chain_ext(Some(s_zy).iter()) // f(X) - f(z) .chain_ext(s_pos_poly.iter()), z, ) }; Ok(SxyAdvice { s_comm, s_zy, s_zy_opening, }) } } #[cfg(test)] mod tests { use super::*; use pairing::bls12_381::{Bls12, Fr}; use pairing::{PrimeField, CurveAffine, CurveProjective}; use crate::cs::{Basic, ConstraintSystem, LinearCombination}; use super::super::verifier::MultiVerifier; use rand::{thread_rng}; use crate::polynomials::{PolyComm, Polynomial}; struct SimpleCircuit; impl<E: Engine> Circuit<E> for SimpleCircuit { fn synthesize<CS: ConstraintSystem<E>>(&self, cs: &mut CS) -> Result<(), SynthesisError> { let (a, b, _) = cs.multiply(|| { Ok(( E::Fr::from_str("10").unwrap(), E::Fr::from_str("20").unwrap(), E::Fr::from_str("200").unwrap(), )) })?; cs.enforce_zero(LinearCombination::from(a) + a - b); Ok(()) } } #[test] fn test_create_proof() { let rng = thread_rng(); let srs = SRS::<Bls12>::new( 20, Fr::from_str("22222").unwrap(), Fr::from_str("33333333").unwrap(), ); let proof: Proof<Bls12, Polynomial<Bls12>> = Proof::create_proof::<_, Basic>(&SimpleCircuit, &srs).unwrap(); let mut batch = MultiVerifier::<Bls12, _, Basic, _>::new(SimpleCircuit, &srs, rng).unwrap(); for _ in 0..1 { batch.add_proof(&proof, &[], |_, _| None); } assert!(batch.check_all()); } #[test] fn polynomial_commitment_test() { let srs = SRS::<Bls12>::new( 20, Fr::from_str("22222").unwrap(), Fr::from_str("33333333").unwrap(), ); // x^-4 + x^-3 + x^-2 + x^-1 + x + x^2 let mut poly = vec![Fr::one(), Fr::one(), Fr::one(), Fr::one(), Fr::zero(), Fr::one(), Fr::one()]; // make commitment to the poly let commitment = poly_comm::<Bls12, _, Polynomial<Bls12>>(2, 4, 2, &srs, poly.iter()).into_point(); let point: Fr = Fr::one(); let mut tmp = point.inverse().unwrap(); tmp.square(); let value = eval_univar_poly::<Bls12>(&poly, tmp, point); // evaluate f(z) poly[4] = value; poly[4].negate(); // f(x) - f(z) let opening = poly_comm_opening(4, 2, &srs, poly.iter(), point); // e(W , hα x )e(g^{v} * W{-z} , hα ) = e(F , h^{x^{−d +max}} ) let alpha_x_precomp = srs.h_pos_x_alpha[1].prepare(); let alpha_precomp = srs.h_pos_x_alpha[0].prepare(); let mut neg_x_n_minus_d_precomp = srs.h_neg_x[srs.d - 2]; neg_x_n_minus_d_precomp.negate(); let neg_x_n_minus_d_precomp = neg_x_n_minus_d_precomp.prepare(); let w = opening.prepare(); let mut gv = srs.g_pos_x[0].mul(value.into_repr()); let mut z_neg = point; z_neg.negate(); let w_minus_z = opening.mul(z_neg.into_repr()); gv.add_assign(&w_minus_z); let gv = gv.into_affine().prepare(); assert!(Bls12::final_exponentiation(&Bls12::miller_loop(&[ (&w, &alpha_x_precomp), (&gv, &alpha_precomp), (&commitment.prepare(), &neg_x_n_minus_d_precomp), ])).unwrap() == <Bls12 as Engine>::Fqk::one()); } }
use actix::prelude::*; use async_trait::async_trait; use tracing::Span; /// Message with span used for trace logging pub struct SpanMessage<I> { pub msg: I, pub span: Span, } impl<M> SpanMessage<M> { pub fn new(msg: M) -> Self { Self { msg, span: Span::current(), } } } impl<M, R: 'static> Message for SpanMessage<M> where M: Message<Result = R>, { type Result = R; } #[async_trait] pub trait AsyncSpanHandler<M> where Self: Actor, M: Message, { async fn handle(msg: M) -> <M as Message>::Result; } #[macro_export] macro_rules! span_message_async_impl { ($message_type:ident, $actor:ident) => { impl actix::Handler<crate::span::SpanMessage<$message_type>> for $actor { type Result = actix::ResponseActFuture<Self, <$message_type as actix::Message>::Result>; fn handle( &mut self, msg: crate::span::SpanMessage<$message_type>, _ctx: &mut actix::Context<Self>, ) -> Self::Result { use actix_interop::FutureInterop; use tracing_futures::Instrument; let crate::span::SpanMessage { span, msg } = msg; let _enter = span.enter(); <Self as AsyncSpanHandler<$message_type>>::handle(msg) .in_current_span() .interop_actor_boxed(self) } } }; } #[macro_export] macro_rules ! async_message_handler_with_span { ({impl AsyncSpanHandler<$M:ident> for $A:ident $t:tt}) => { crate::span_message_async_impl!($M, $A); #[async_trait::async_trait] impl AsyncSpanHandler<$M> for $A $t } }
use futures_util::future::{self, FutureExt}; use twilight_cache::{ entity::{ channel::{ attachment::{AttachmentEntity, AttachmentRepository}, category_channel::{CategoryChannelEntity, CategoryChannelRepository}, group::{GroupEntity, GroupRepository}, message::{MessageEntity, MessageRepository}, private_channel::{PrivateChannelEntity, PrivateChannelRepository}, text_channel::{TextChannelEntity, TextChannelRepository}, voice_channel::{VoiceChannelEntity, VoiceChannelRepository}, }, gateway::presence::{PresenceEntity, PresenceRepository}, guild::{ emoji::{EmojiEntity, EmojiRepository}, member::{MemberEntity, MemberRepository}, role::{RoleEntity, RoleRepository}, GuildEntity, GuildRepository, }, user::{ current_user::{CurrentUserEntity, CurrentUserRepository}, UserEntity, UserRepository, }, voice::{VoiceStateEntity, VoiceStateRepository}, Entity, }, repository::{ GetEntityFuture, ListEntitiesFuture, RemoveEntityFuture, SingleEntityRepository, UpsertEntityFuture, }, Backend, Cache, Repository, }; use serde::{de::DeserializeOwned, Serialize}; use std::{marker::PhantomData, sync::Arc}; use twilight_model::id::{AttachmentId, ChannelId, EmojiId, GuildId, MessageId, RoleId, UserId}; use unqlite::{Error, UnQLite, KV}; pub type UnqliteCache = Cache<UnqliteBackend>; pub trait UnqliteEntity: Entity { fn key(id: Self::Id) -> Vec<u8>; } pub trait UnqliteSingleEntity: Entity { fn key() -> &'static [u8]; } impl UnqliteEntity for AttachmentEntity { fn key(id: AttachmentId) -> Vec<u8> { format!("at:{}", id).into_bytes() } } impl UnqliteEntity for CategoryChannelEntity { fn key(id: ChannelId) -> Vec<u8> { format!("cc:{}", id).into_bytes() } } impl UnqliteSingleEntity for CurrentUserEntity { fn key() -> &'static [u8] { &[b'u', b'c'] } } impl UnqliteEntity for EmojiEntity { fn key(id: EmojiId) -> Vec<u8> { format!("em:{}", id).into_bytes() } } impl UnqliteEntity for GroupEntity { fn key(id: ChannelId) -> Vec<u8> { format!("gr:{}", id).into_bytes() } } impl UnqliteEntity for GuildEntity { fn key(id: GuildId) -> Vec<u8> { format!("g:{}", id).into_bytes() } } impl UnqliteEntity for MemberEntity { fn key((guild_id, user_id): (GuildId, UserId)) -> Vec<u8> { format!("m:{}:{}", guild_id, user_id).into_bytes() } } impl UnqliteEntity for MessageEntity { fn key(id: MessageId) -> Vec<u8> { format!("ms:{}", id).into_bytes() } } impl UnqliteEntity for PresenceEntity { fn key((guild_id, user_id): (GuildId, UserId)) -> Vec<u8> { format!("pr:{}:{}", guild_id, user_id).into_bytes() } } impl UnqliteEntity for PrivateChannelEntity { fn key(id: ChannelId) -> Vec<u8> { format!("cp:{}", id).into_bytes() } } impl UnqliteEntity for RoleEntity { fn key(id: RoleId) -> Vec<u8> { format!("r:{}", id).into_bytes() } } impl UnqliteEntity for TextChannelEntity { fn key(id: ChannelId) -> Vec<u8> { format!("ct:{}", id).into_bytes() } } impl UnqliteEntity for UserEntity { fn key(id: UserId) -> Vec<u8> { format!("u:{}", id).into_bytes() } } impl UnqliteEntity for VoiceChannelEntity { fn key(id: ChannelId) -> Vec<u8> { format!("cv:{}", id).into_bytes() } } impl UnqliteEntity for VoiceStateEntity { fn key((guild_id, user_id): (GuildId, UserId)) -> Vec<u8> { format!("v:{}:{}", guild_id, user_id).into_bytes() } } pub struct UnqliteRepository<T>(UnqliteBackend, PhantomData<T>); impl<T> UnqliteRepository<T> { fn new(backend: UnqliteBackend) -> Self { Self(backend, PhantomData) } } impl<T: DeserializeOwned + Serialize + UnqliteEntity> Repository<T, UnqliteBackend> for UnqliteRepository<T> { fn backend(&self) -> UnqliteBackend { self.0.clone() } fn get(&self, entity_id: T::Id) -> GetEntityFuture<'_, T, Error> { let bytes: Vec<u8> = (self.0).0.kv_fetch(T::key(entity_id)).unwrap(); future::ok(Some(serde_cbor::from_slice::<T>(&bytes).unwrap())).boxed() } fn list(&self) -> ListEntitiesFuture<'_, T, Error> { unimplemented!("not implemented by this backend"); } fn remove(&self, entity_id: T::Id) -> RemoveEntityFuture<'_, Error> { future::ready((self.0).0.kv_delete(T::key(entity_id))).boxed() } fn upsert(&self, entity: T) -> UpsertEntityFuture<'_, Error> { let bytes = serde_cbor::to_vec(&entity).unwrap(); future::ready((self.0).0.kv_store(T::key(entity.id()), bytes)).boxed() } } impl<T: DeserializeOwned + Serialize + UnqliteSingleEntity> SingleEntityRepository<T, UnqliteBackend> for UnqliteRepository<T> { fn backend(&self) -> UnqliteBackend { self.0.clone() } fn get(&self) -> GetEntityFuture<'_, T, Error> { let bytes: Vec<u8> = (self.0).0.kv_fetch(T::key()).unwrap(); future::ok(Some(serde_cbor::from_slice::<T>(&bytes).unwrap())).boxed() } fn remove(&self) -> RemoveEntityFuture<'_, Error> { future::ready((self.0).0.kv_delete(T::key())).boxed() } fn upsert(&self, entity: T) -> UpsertEntityFuture<'_, Error> { let bytes = serde_cbor::to_vec(&entity).unwrap(); future::ready((self.0).0.kv_store(T::key(), bytes)).boxed() } } impl AttachmentRepository<UnqliteBackend> for UnqliteRepository<AttachmentEntity> {} impl CategoryChannelRepository<UnqliteBackend> for UnqliteRepository<CategoryChannelEntity> {} impl CurrentUserRepository<UnqliteBackend> for UnqliteRepository<CurrentUserEntity> { fn guild_ids(&self) -> twilight_cache::repository::ListEntityIdsFuture<'_, GuildId, Error> { unimplemented!("not implemented by this backend"); } } impl EmojiRepository<UnqliteBackend> for UnqliteRepository<EmojiEntity> {} impl GroupRepository<UnqliteBackend> for UnqliteRepository<GroupEntity> {} impl GuildRepository<UnqliteBackend> for UnqliteRepository<GuildEntity> { fn channel_ids( &self, _: GuildId, ) -> twilight_cache::repository::ListEntityIdsFuture<'_, ChannelId, Error> { unimplemented!("not implemented by this backend"); } fn channels( &self, _: GuildId, ) -> ListEntitiesFuture<'_, twilight_cache::entity::channel::GuildChannelEntity, Error> { unimplemented!("not implemented by this backend"); } fn emoji_ids( &self, _: GuildId, ) -> twilight_cache::repository::ListEntityIdsFuture<'_, EmojiId, Error> { unimplemented!("not implemented by this backend"); } fn member_ids( &self, _: GuildId, ) -> twilight_cache::repository::ListEntityIdsFuture<'_, UserId, Error> { unimplemented!("not implemented by this backend"); } fn members(&self, _: GuildId) -> ListEntitiesFuture<'_, MemberEntity, Error> { unimplemented!("not implemented by this backend"); } fn presence_ids( &self, _: GuildId, ) -> twilight_cache::repository::ListEntityIdsFuture<'_, UserId, Error> { unimplemented!("not implemented by this backend"); } fn presences(&self, _: GuildId) -> ListEntitiesFuture<'_, PresenceEntity, Error> { unimplemented!("not implemented by this backend"); } fn role_ids( &self, _: GuildId, ) -> twilight_cache::repository::ListEntityIdsFuture<'_, RoleId, Error> { unimplemented!("not implemented by this backend"); } fn voice_state_ids( &self, _: GuildId, ) -> twilight_cache::repository::ListEntityIdsFuture<'_, UserId, Error> { unimplemented!("not implemented by this backend"); } fn voice_states(&self, _: GuildId) -> ListEntitiesFuture<'_, VoiceStateEntity, Error> { unimplemented!("not implemented by this backend"); } } impl MemberRepository<UnqliteBackend> for UnqliteRepository<MemberEntity> {} impl MessageRepository<UnqliteBackend> for UnqliteRepository<MessageEntity> {} impl PresenceRepository<UnqliteBackend> for UnqliteRepository<PresenceEntity> {} impl PrivateChannelRepository<UnqliteBackend> for UnqliteRepository<PrivateChannelEntity> {} impl RoleRepository<UnqliteBackend> for UnqliteRepository<RoleEntity> {} impl TextChannelRepository<UnqliteBackend> for UnqliteRepository<TextChannelEntity> {} impl VoiceChannelRepository<UnqliteBackend> for UnqliteRepository<VoiceChannelEntity> {} impl VoiceStateRepository<UnqliteBackend> for UnqliteRepository<VoiceStateEntity> {} impl UserRepository<UnqliteBackend> for UnqliteRepository<UserEntity> { fn guild_ids( &self, _: UserId, ) -> twilight_cache::repository::ListEntityIdsFuture<'_, GuildId, Error> { unimplemented!("not implemented by this backend") } } /// `twilight-cache` backend for the [UnQLite] database. /// /// [UnQLite]: https://docs.rs/unqlite #[derive(Clone)] pub struct UnqliteBackend(Arc<UnQLite>); impl UnqliteBackend { /// Create a new `twilight-cache` UnQLite backend with a provided instance. pub fn new(unqlite: UnQLite) -> Self { Self(Arc::new(unqlite)) } /// Shortcut for `UnQLite::create` and [`new`]. /// /// [`new`]: #method.new pub fn create(filename: impl AsRef<str>) -> UnQLite { UnQLite::create(filename) } /// Shortcut for `UnQLite::create_in_memory` and [`new`]. /// /// [`new`]: #method.new pub fn create_in_memory() -> UnQLite { UnQLite::create_in_memory() } /// Shortcut for `UnQLite::create_temp` and [`new`]. /// /// [`new`]: #method.new pub fn create_temp() -> UnQLite { UnQLite::create_temp() } /// Shortcut for `UnQLite::open_mmap` and [`new`]. /// /// [`new`]: #method.new pub fn open_mmap(filename: impl AsRef<str>) -> UnQLite { UnQLite::open_mmap(filename) } /// Shortcut for `UnQLite::open_readonly` and [`new`]. /// /// [`new`]: #method.new pub fn open_readonly(filename: impl AsRef<str>) -> UnQLite { UnQLite::open_readonly(filename) } fn repo<T>(&self) -> UnqliteRepository<T> { UnqliteRepository::new(self.clone()) } } impl Backend for UnqliteBackend { type Error = Error; type AttachmentRepository = UnqliteRepository<AttachmentEntity>; type CategoryChannelRepository = UnqliteRepository<CategoryChannelEntity>; type CurrentUserRepository = UnqliteRepository<CurrentUserEntity>; type EmojiRepository = UnqliteRepository<EmojiEntity>; type GroupRepository = UnqliteRepository<GroupEntity>; type GuildRepository = UnqliteRepository<GuildEntity>; type MemberRepository = UnqliteRepository<MemberEntity>; type MessageRepository = UnqliteRepository<MessageEntity>; type PresenceRepository = UnqliteRepository<PresenceEntity>; type PrivateChannelRepository = UnqliteRepository<PrivateChannelEntity>; type RoleRepository = UnqliteRepository<RoleEntity>; type TextChannelRepository = UnqliteRepository<TextChannelEntity>; type UserRepository = UnqliteRepository<UserEntity>; type VoiceChannelRepository = UnqliteRepository<VoiceChannelEntity>; type VoiceStateRepository = UnqliteRepository<VoiceStateEntity>; fn attachments(&self) -> Self::AttachmentRepository { self.repo() } fn category_channels(&self) -> Self::CategoryChannelRepository { self.repo() } fn current_user(&self) -> Self::CurrentUserRepository { self.repo() } fn emojis(&self) -> Self::EmojiRepository { self.repo() } fn groups(&self) -> Self::GroupRepository { self.repo() } fn guilds(&self) -> Self::GuildRepository { self.repo() } fn members(&self) -> Self::MemberRepository { self.repo() } fn messages(&self) -> Self::MessageRepository { self.repo() } fn presences(&self) -> Self::PresenceRepository { self.repo() } fn private_channels(&self) -> Self::PrivateChannelRepository { self.repo() } fn roles(&self) -> Self::RoleRepository { self.repo() } fn text_channels(&self) -> Self::TextChannelRepository { self.repo() } fn users(&self) -> Self::UserRepository { self.repo() } fn voice_channels(&self) -> Self::VoiceChannelRepository { self.repo() } fn voice_states(&self) -> Self::VoiceStateRepository { self.repo() } }
#[doc = "Reader of register DIV_CSR"] pub type R = crate::R<u32, super::DIV_CSR>; #[doc = "Reader of field `DIRTY`"] pub type DIRTY_R = crate::R<bool, bool>; #[doc = "Reader of field `READY`"] pub type READY_R = crate::R<bool, bool>; impl R { #[doc = "Bit 1 - Changes to 1 when any register is written, and back to 0 when QUOTIENT is read.\\n Software can use this flag to make save/restore more efficient (skip if not DIRTY).\\n If the flag is used in this way, it's recommended to either read QUOTIENT only,\\n or REMAINDER and then QUOTIENT, to prevent data loss on context switch."] #[inline(always)] pub fn dirty(&self) -> DIRTY_R { DIRTY_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 0 - Reads as 0 when a calculation is in progress, 1 otherwise.\\n Writing an operand (xDIVIDEND, xDIVISOR) will immediately start a new calculation, no\\n matter if one is already in progress.\\n Writing to a result register will immediately terminate any in-progress calculation\\n and set the READY and DIRTY flags."] #[inline(always)] pub fn ready(&self) -> READY_R { READY_R::new((self.bits & 0x01) != 0) } }
#[doc = "Register `DBPCR` reader"] pub type R = crate::R<DBPCR_SPEC>; #[doc = "Register `DBPCR` writer"] pub type W = crate::W<DBPCR_SPEC>; #[doc = "Field `DBP` reader - Disable Backup domain write protection In reset state, all registers and SRAM in Backup domain are protected against parasitic write access. This bit must be set to enable write access to these registers."] pub type DBP_R = crate::BitReader; #[doc = "Field `DBP` writer - Disable Backup domain write protection In reset state, all registers and SRAM in Backup domain are protected against parasitic write access. This bit must be set to enable write access to these registers."] pub type DBP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; impl R { #[doc = "Bit 0 - Disable Backup domain write protection In reset state, all registers and SRAM in Backup domain are protected against parasitic write access. This bit must be set to enable write access to these registers."] #[inline(always)] pub fn dbp(&self) -> DBP_R { DBP_R::new((self.bits & 1) != 0) } } impl W { #[doc = "Bit 0 - Disable Backup domain write protection In reset state, all registers and SRAM in Backup domain are protected against parasitic write access. This bit must be set to enable write access to these registers."] #[inline(always)] #[must_use] pub fn dbp(&mut self) -> DBP_W<DBPCR_SPEC, 0> { DBP_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "PWR disable backup protection control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dbpcr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dbpcr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct DBPCR_SPEC; impl crate::RegisterSpec for DBPCR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`dbpcr::R`](R) reader structure"] impl crate::Readable for DBPCR_SPEC {} #[doc = "`write(|w| ..)` method takes [`dbpcr::W`](W) writer structure"] impl crate::Writable for DBPCR_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets DBPCR to value 0"] impl crate::Resettable for DBPCR_SPEC { const RESET_VALUE: Self::Ux = 0; }
use std::time::Instant; use bevy::{ app::AppExit, core::CorePlugin, prelude::*, render::pass::ClearColor, sprite::collide_aabb::{collide, Collision}, type_registry::TypeRegistryPlugin, }; #[cfg(not(headless))] use bevy::winit::WinitConfig; use bevy_benchmark_games::{metrics::IterationMetrics, metrics::Metrics, random::FakeRand}; use rand::Rng; #[cfg(headless)] const RUN_FOR_FRAMES: usize = 300; #[cfg(not(headless))] const RUN_FOR_FRAMES: usize = 400; #[cfg(headless)] const ITERATIONS: usize = 200; #[cfg(not(headless))] const ITERATIONS: usize = 2; /// An implementation of the classic game "Breakout" fn main() { // Create CPU cycle and instruction counters let mut counters = perf_event::Group::new().unwrap(); let cycles = perf_event::Builder::new() .group(&mut counters) .kind(perf_event::events::Hardware::REF_CPU_CYCLES) .build() .unwrap(); let instructions = perf_event::Builder::new() .group(&mut counters) .kind(perf_event::events::Hardware::INSTRUCTIONS) .build() .unwrap(); fn build_app() -> App { let mut builder = App::build(); #[cfg(not(headless))] builder.add_default_plugins().add_resource(WinitConfig { return_from_run: true, }); #[cfg(headless)] builder .add_plugin(TypeRegistryPlugin::default()) .add_plugin(CorePlugin::default()) .add_plugin(TransformPlugin::default()); builder .add_resource(Scoreboard { score: 0 }) .add_resource(ClearColor(Color::rgb(0.7, 0.7, 0.7))) .add_startup_system(setup.system()) .add_system(paddle_movement_system.system()) .add_system(ball_collision_system.system()) .add_system(ball_movement_system.system()) .add_system(scoreboard_system.system()) .add_system(exit_game.system()); builder.app } let mut metrics = Metrics { iterations: Vec::with_capacity(ITERATIONS), }; for _ in 0..ITERATIONS { #[allow(unused_mut)] let mut app = build_app(); // Get current instant let instant = Instant::now(); // Enable CPU counters counters.enable().unwrap(); #[cfg(not(headless))] app.run(); #[cfg(headless)] for _ in 0..RUN_FOR_FRAMES { app.update(); } // Disable CPU counters counters.disable().unwrap(); // Get time let elapsed = instant.elapsed(); // Record CPU metrics let counts = counters.read().unwrap(); metrics.iterations.push(IterationMetrics { cpu_cycles: counts[&cycles], cpu_instructions: counts[&instructions], avg_frame_time_us: elapsed.as_micros() as f64 / RUN_FOR_FRAMES as f64, }); // Reset CPU counters counters.reset().unwrap(); } // Output metrics to be consumed by benchmarking harness println!("{}", serde_json::to_string(&metrics).unwrap()); } struct Paddle { speed: f32, } struct Ball { velocity: Vec3, } struct Scoreboard { score: usize, } enum Collider { Solid, Scorable, } fn setup( mut commands: Commands, #[cfg(not(headless))] mut materials: ResMut<Assets<ColorMaterial>>, #[cfg(not(headless))] asset_server: Res<AssetServer>, ) { // Add the game's entities to our world commands // cameras .spawn(Camera2dComponents::default()) .spawn(UiCameraComponents::default()) // paddle .spawn(SpriteComponents { #[cfg(not(headless))] material: materials.add(Color::rgb(0.2, 0.2, 0.8).into()), transform: Transform::from_translation(Vec3::new(0.0, -215.0, 0.0)), sprite: Sprite::new(Vec2::new(120.0, 30.0)), ..Default::default() }) .with(Paddle { speed: 500.0 }) .with(Collider::Solid) // ball .spawn(SpriteComponents { #[cfg(not(headless))] material: materials.add(Color::rgb(0.8, 0.2, 0.2).into()), transform: Transform::from_translation(Vec3::new(0.0, -50.0, 1.0)), sprite: Sprite::new(Vec2::new(30.0, 30.0)), ..Default::default() }) .with(Ball { velocity: 400.0 * Vec3::new(0.5, -0.5, 0.0).normalize(), }); #[cfg(not(headless))] commands // scoreboard .spawn(TextComponents { text: Text { font: asset_server.load("assets/fonts/FiraSans-Bold.ttf").unwrap(), value: "Score:".to_string(), style: TextStyle { color: Color::rgb(0.2, 0.2, 0.8), font_size: 40.0, }, }, style: Style { position_type: PositionType::Absolute, position: Rect { top: Val::Px(5.0), left: Val::Px(5.0), ..Default::default() }, ..Default::default() }, ..Default::default() }); // Add walls #[cfg(not(headless))] let wall_material = materials.add(Color::rgb(0.5, 0.5, 0.5).into()); let wall_thickness = 10.0; let bounds = Vec2::new(900.0, 600.0); commands // left .spawn(SpriteComponents { #[cfg(not(headless))] material: wall_material, transform: Transform::from_translation(Vec3::new(-bounds.x() / 2.0, 0.0, 0.0)), sprite: Sprite::new(Vec2::new(wall_thickness, bounds.y() + wall_thickness)), ..Default::default() }) .with(Collider::Solid) // right .spawn(SpriteComponents { #[cfg(not(headless))] material: wall_material, transform: Transform::from_translation(Vec3::new(bounds.x() / 2.0, 0.0, 0.0)), sprite: Sprite::new(Vec2::new(wall_thickness, bounds.y() + wall_thickness)), ..Default::default() }) .with(Collider::Solid) // bottom .spawn(SpriteComponents { #[cfg(not(headless))] material: wall_material, transform: Transform::from_translation(Vec3::new(0.0, -bounds.y() / 2.0, 0.0)), sprite: Sprite::new(Vec2::new(bounds.x() + wall_thickness, wall_thickness)), ..Default::default() }) .with(Collider::Solid) // top .spawn(SpriteComponents { #[cfg(not(headless))] material: wall_material, transform: Transform::from_translation(Vec3::new(0.0, bounds.y() / 2.0, 0.0)), sprite: Sprite::new(Vec2::new(bounds.x() + wall_thickness, wall_thickness)), ..Default::default() }) .with(Collider::Solid); // Add bricks let brick_rows = 4; let brick_columns = 5; let brick_spacing = 20.0; let brick_size = Vec2::new(150.0, 30.0); let bricks_width = brick_columns as f32 * (brick_size.x() + brick_spacing) - brick_spacing; // center the bricks and move them up a bit let bricks_offset = Vec3::new(-(bricks_width - brick_size.x()) / 2.0, 100.0, 0.0); for row in 0..brick_rows { let y_position = row as f32 * (brick_size.y() + brick_spacing); for column in 0..brick_columns { let brick_position = Vec3::new( column as f32 * (brick_size.x() + brick_spacing), y_position, 0.0, ) + bricks_offset; commands // brick .spawn(SpriteComponents { #[cfg(not(headless))] material: materials.add(Color::rgb(0.2, 0.2, 0.8).into()), sprite: Sprite::new(brick_size), transform: Transform::from_translation(brick_position), ..Default::default() }) .with(Collider::Scorable); } } } #[derive(Default)] struct FrameCount(usize); fn exit_game(mut state: Local<FrameCount>, mut exit_events: ResMut<Events<AppExit>>) { state.0 += 1; if state.0 > RUN_FOR_FRAMES { exit_events.send(AppExit); } } #[derive(Default)] struct RngState { rng: FakeRand, } fn paddle_movement_system( mut state: Local<RngState>, time: Res<Time>, mut query: Query<(&Paddle, &mut Transform)>, ) { for (paddle, mut transform) in &mut query.iter() { let mut direction = 0.0; if state.rng.gen::<bool>() { direction -= 1.0; } else { direction += 1.0; } let translation = transform.translation_mut(); // move the paddle horizontally *translation.x_mut() += time.delta_seconds * direction * paddle.speed; // bound the paddle within the walls *translation.x_mut() = translation.x().min(380.0).max(-380.0); } } fn ball_movement_system(time: Res<Time>, mut ball_query: Query<(&Ball, &mut Transform)>) { // clamp the timestep to stop the ball from escaping when the game starts let delta_seconds = f32::min(0.2, time.delta_seconds); for (ball, mut transform) in &mut ball_query.iter() { transform.translate(ball.velocity * delta_seconds); } } fn scoreboard_system(scoreboard: Res<Scoreboard>, mut query: Query<&mut Text>) { for mut text in &mut query.iter() { text.value = format!("Score: {}", scoreboard.score); } } fn ball_collision_system( mut commands: Commands, mut scoreboard: ResMut<Scoreboard>, mut ball_query: Query<(&mut Ball, &Transform, &Sprite)>, mut collider_query: Query<(Entity, &Collider, &Transform, &Sprite)>, ) { for (mut ball, ball_transform, sprite) in &mut ball_query.iter() { let ball_size = sprite.size; let velocity = &mut ball.velocity; // check collision with walls for (collider_entity, collider, transform, sprite) in &mut collider_query.iter() { let collision = collide( ball_transform.translation(), ball_size, transform.translation(), sprite.size, ); if let Some(collision) = collision { // scorable colliders should be despawned and increment the scoreboard on collision if let Collider::Scorable = *collider { scoreboard.score += 1; commands.despawn(collider_entity); } // reflect the ball when it collides let mut reflect_x = false; let mut reflect_y = false; // only reflect if the ball's velocity is going in the opposite direction of the collision match collision { Collision::Left => reflect_x = velocity.x() > 0.0, Collision::Right => reflect_x = velocity.x() < 0.0, Collision::Top => reflect_y = velocity.y() < 0.0, Collision::Bottom => reflect_y = velocity.y() > 0.0, } // reflect velocity on the x-axis if we hit something on the x-axis if reflect_x { *velocity.x_mut() = -velocity.x(); } // reflect velocity on the y-axis if we hit something on the y-axis if reflect_y { *velocity.y_mut() = -velocity.y(); } break; } } } }
use std::io::{self, BufRead}; fn repeated_char(s: String) -> (bool, bool) { let mut has_2 = false; let mut has_3 = false; let mut s = s.into_bytes(); s.sort(); let mut iter = s.iter().peekable(); while let Some(ch) = iter.next() { let mut count = 1; while iter.peek() == Some(&&ch) { count += 1; iter.next(); } if count == 2 { has_2 = true; } if count == 3 { has_3 = true; } } (has_2, has_3) } fn main() { let stdin = io::stdin(); let (has_2s, has_3s): (Vec<_>, Vec<_>) = stdin .lock() .lines() .map(|line| repeated_char(line.unwrap())) .unzip(); let checksum = has_2s.iter().filter(|&&x| x).count() * has_3s.iter().filter(|&&x| x).count(); println!("{}", checksum); }
use std::marker::PhantomData; use super::{Pusherator, PusheratorBuild}; pub struct FilterMap<Next, Func, In> { next: Next, func: Func, _marker: PhantomData<fn(In)>, } impl<Next, Func, In> Pusherator for FilterMap<Next, Func, In> where Next: Pusherator, Func: FnMut(In) -> Option<Next::Item>, { type Item = In; fn give(&mut self, item: Self::Item) { if let Some(item) = (self.func)(item) { self.next.give(item); } } } impl<Next, Func, In> FilterMap<Next, Func, In> where Next: Pusherator, Func: FnMut(In) -> Option<Next::Item>, { pub fn new(func: Func, next: Next) -> Self { Self { next, func, _marker: PhantomData, } } } pub struct FilterMapBuild<Prev, Func> where Prev: PusheratorBuild, { prev: Prev, func: Func, } impl<Prev, Func, Out> FilterMapBuild<Prev, Func> where Prev: PusheratorBuild, Func: FnMut(Prev::ItemOut) -> Option<Out>, { pub fn new(prev: Prev, func: Func) -> Self { Self { prev, func } } } impl<Prev, Func, Out> PusheratorBuild for FilterMapBuild<Prev, Func> where Prev: PusheratorBuild, Func: FnMut(Prev::ItemOut) -> Option<Out>, { type ItemOut = Out; type Output<O: Pusherator<Item = Self::ItemOut>> = Prev::Output<FilterMap<O, Func, Prev::ItemOut>>; fn push_to<O>(self, input: O) -> Self::Output<O> where O: Pusherator<Item = Self::ItemOut>, { self.prev.push_to(FilterMap::new(self.func, input)) } }
use crate::dasm::{DasmError, InstructionData}; use crate::spec::mmu::{Error as MmuError, MMU}; use crate::spec::mnemonic::Mnemonic; use crate::spec::opcode::Instruction; use crate::debug_logger::{cpu_logger::CPU_LOGGER, DebugLogger}; use crate::spec::register::{RegisterError, Registers, TRegister}; use std::convert::TryFrom; use std::num::Wrapping; pub trait TCPU { type E; fn tick(&mut self, mmu: &mut MMU) -> Result<u8, Self::E>; } pub trait TStackable { fn push_stack_byte(&mut self, value: u8, mmu: &mut MMU) -> Result<(), Error>; fn push_stack_word(&mut self, value: u16, mmu: &mut MMU) -> Result<(), Error>; fn pop_stack_byte(&mut self, mmu: &mut MMU) -> Result<u8, Error>; fn pop_stack_word(&mut self, mmu: &mut MMU) -> Result<u16, Error>; } pub struct CPU { pub(crate) registers: Registers, pub(crate) halt: bool, } #[derive(Debug)] pub enum Error { Default(String), InitializationError, UnexpectedOpcode(String), UnsupportedOpcode(Instruction), MmuError(MmuError), DecodeError(DasmError), RegisterError(RegisterError), UnexpectedOpcodeState(InstructionData, u16), } impl From<RegisterError> for Error { fn from(reg_error: RegisterError) -> Self { Error::RegisterError(reg_error) } } impl From<MmuError> for Error { fn from(mmu_error: MmuError) -> Self { Error::MmuError(mmu_error) } } impl TCPU for CPU { type E = Error; fn tick(&mut self, mmu: &mut MMU) -> Result<u8, Error> { self.gameboy_doc_debug(mmu); let last_pc = *self.registers.pc.get_value(); let opcode = self.fetch(mmu)?; let data = [ mmu.read_byte(*self.registers.pc.get_value()) .map_err(Error::MmuError)?, mmu.read_byte((Wrapping(*self.registers.pc.get_value()) + Wrapping(1)).0) .map_err(Error::MmuError)?, ]; CPU_LOGGER.log("PC", || { println!("[PC: {:#X}] Op: {}, Dat: [{:X?}]", last_pc, opcode, data) }); self.registers .pc .set_value(*self.registers.pc.get_value() + opcode.size as u16); let cycles = self.execute(&opcode, &data, mmu)?; CPU_LOGGER.log("REG", || println!("\t{}", self.registers)); Ok(cycles) } } impl TStackable for CPU { fn push_stack_byte(&mut self, value: u8, mmu: &mut MMU) -> Result<(), Error> { self.registers .sp .update_value_checked(|sp| { mmu.write_byte(*sp, value)?; Ok(sp.checked_sub(1)) }) .map_err(Error::RegisterError) } fn push_stack_word(&mut self, value: u16, mmu: &mut MMU) -> Result<(), Error> { self.registers .sp .update_value_checked(|sp| Ok(sp.checked_sub(2)))?; mmu.write_word(*self.registers.sp.get_value(), value)?; Ok(()) } fn pop_stack_byte(&mut self, _mmu: &mut MMU) -> Result<u8, Error> { unimplemented!() } fn pop_stack_word(&mut self, mmu: &mut MMU) -> Result<u16, Error> { let stack_val = mmu.read_word(*self.registers.sp.get_value())?; self.registers .sp .update_value_checked(|sp| Ok(sp.checked_add(2)))?; Ok(stack_val) } } impl CPU { fn increment_pc(&mut self) -> Result<u16, Error> { let next = *self.registers.pc.get_value(); self.registers .pc .update_value_checked(|pc| Ok(pc.checked_add(1))) .map_err(Error::RegisterError)?; Ok(next) } fn fetch(&mut self, mmu: &MMU) -> Result<InstructionData, Error> { let pc = self.increment_pc()?; let op = mmu.read_byte(pc).map_err(Error::MmuError)?; let cb_byte = match op { 0xCB => Some(mmu.read_byte(pc + 1).map_err(Error::MmuError)?), _ => None, }; InstructionData::try_from((op, cb_byte)).map_err(Error::DecodeError) } fn execute( &mut self, instruction_data: &InstructionData, opcode_data: &[u8; 2], mmu: &mut MMU, ) -> Result<u8, Error> { let result = match instruction_data.mnemonic { Mnemonic::LD | Mnemonic::LDHL => self.evaluate_ld(instruction_data, opcode_data, mmu), Mnemonic::PUSH | Mnemonic::POP => { self.evaluate_stack_op(instruction_data, opcode_data, mmu) } Mnemonic::ADD | Mnemonic::ADC | Mnemonic::SUB | Mnemonic::SBC | Mnemonic::AND | Mnemonic::XOR | Mnemonic::OR | Mnemonic::CP | Mnemonic::INC | Mnemonic::DEC | Mnemonic::DAA | Mnemonic::CPL => self.evaluate_alu(instruction_data, opcode_data, mmu), Mnemonic::RLCA | Mnemonic::RLA | Mnemonic::RRCA | Mnemonic::RRA | Mnemonic::RLC | Mnemonic::RL | Mnemonic::RRC | Mnemonic::RR | Mnemonic::SLA | Mnemonic::SWAP | Mnemonic::SRA | Mnemonic::SET | Mnemonic::BIT | Mnemonic::RES | Mnemonic::SRL => self.evaluate_bitwise(instruction_data, opcode_data, mmu), Mnemonic::CCF | Mnemonic::SCF | Mnemonic::NOP | Mnemonic::HALT | Mnemonic::STOP | Mnemonic::DI | Mnemonic::EI => self.evaluate_control(instruction_data, opcode_data, mmu), Mnemonic::JP | Mnemonic::JR | Mnemonic::CALL | Mnemonic::RET | Mnemonic::RETI | Mnemonic::RST | Mnemonic::DB | Mnemonic::DW => self.evaluate_branch(instruction_data, opcode_data, mmu), Mnemonic::UNIMPLEMENTED => Ok(0), }?; Ok(result) } pub fn handle_interrupts(&mut self, mmu: &mut MMU) -> Result<u8, Error> { if let Some(interrupt) = mmu.interrupts_enabled()? { CPU_LOGGER.log("INTS", || println!("Handling Interrupt: {:?}", interrupt)); let isr = interrupt.get_isr_location(); CPU_LOGGER.log("INTS", || println!("Jumping to {:X}", isr)); self.push_stack_word(*self.registers.pc.get_value(), mmu)?; self.registers.pc.set_value(isr); mmu.write_interrupt_enable_reg(false); mmu.set_interrupt_bit(interrupt, false)?; return Ok(5); } Ok(0) } pub fn gameboy_doc_debug(&self, mmu: &MMU) { CPU_LOGGER.log("GB_DOC", || { let pc_mem = [ mmu.read_byte(*self.registers.pc.get_value()) .map_err(Error::MmuError).unwrap(), mmu.read_byte((Wrapping(*self.registers.pc.get_value()) + Wrapping(1)).0) .map_err(Error::MmuError).unwrap(), mmu.read_byte((Wrapping(*self.registers.pc.get_value()) + Wrapping(2)).0) .map_err(Error::MmuError).unwrap(), mmu.read_byte((Wrapping(*self.registers.pc.get_value()) + Wrapping(3)).0) .map_err(Error::MmuError).unwrap(), ]; println!( "A:{:02X} F:{:02X} B:{:02X} C:{:02X} D:{:02X} E:{:02X} H:{:02X} L:{:02X} SP:{:04X} PC:{:04X} PCMEM:{:02X},{:02X},{:02X},{:02X}", self.registers.a.get_value(), self.registers.f.get_value(), self.registers.b.get_value(), self.registers.c.get_value(), self.registers.d.get_value(), self.registers.e.get_value(), self.registers.h.get_value(), self.registers.l.get_value(), self.registers.sp.get_value(), self.registers.pc.get_value(), pc_mem[0], pc_mem[1], pc_mem[2], pc_mem[3] ); }); } pub fn new() -> Result<CPU, Error> { Ok(CPU { registers: Registers::new(), halt: false, }) } }
#[doc = "Register `C1_AHB1LPENR` reader"] pub type R = crate::R<C1_AHB1LPENR_SPEC>; #[doc = "Register `C1_AHB1LPENR` writer"] pub type W = crate::W<C1_AHB1LPENR_SPEC>; #[doc = "Field `DMA1LPEN` reader - DMA1 Clock Enable During CSleep Mode"] pub type DMA1LPEN_R = crate::BitReader<DMA1LPEN_A>; #[doc = "DMA1 Clock Enable During CSleep Mode\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum DMA1LPEN_A { #[doc = "0: The selected clock is disabled during csleep mode"] Disabled = 0, #[doc = "1: The selected clock is enabled during csleep mode"] Enabled = 1, } impl From<DMA1LPEN_A> for bool { #[inline(always)] fn from(variant: DMA1LPEN_A) -> Self { variant as u8 != 0 } } impl DMA1LPEN_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DMA1LPEN_A { match self.bits { false => DMA1LPEN_A::Disabled, true => DMA1LPEN_A::Enabled, } } #[doc = "The selected clock is disabled during csleep mode"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == DMA1LPEN_A::Disabled } #[doc = "The selected clock is enabled during csleep mode"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == DMA1LPEN_A::Enabled } } #[doc = "Field `DMA1LPEN` writer - DMA1 Clock Enable During CSleep Mode"] pub type DMA1LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, DMA1LPEN_A>; impl<'a, REG, const O: u8> DMA1LPEN_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "The selected clock is disabled during csleep mode"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(DMA1LPEN_A::Disabled) } #[doc = "The selected clock is enabled during csleep mode"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(DMA1LPEN_A::Enabled) } } #[doc = "Field `DMA2LPEN` reader - DMA2 Clock Enable During CSleep Mode"] pub use DMA1LPEN_R as DMA2LPEN_R; #[doc = "Field `ADC12LPEN` reader - ADC1/2 Peripheral Clocks Enable During CSleep Mode"] pub use DMA1LPEN_R as ADC12LPEN_R; #[doc = "Field `ETH1MACLPEN` reader - Ethernet MAC bus interface Clock Enable During CSleep Mode"] pub use DMA1LPEN_R as ETH1MACLPEN_R; #[doc = "Field `ETH1TXLPEN` reader - Ethernet Transmission Clock Enable During CSleep Mode"] pub use DMA1LPEN_R as ETH1TXLPEN_R; #[doc = "Field `ETH1RXLPEN` reader - Ethernet Reception Clock Enable During CSleep Mode"] pub use DMA1LPEN_R as ETH1RXLPEN_R; #[doc = "Field `USB1OTGLPEN` reader - USB1OTG peripheral clock enable during CSleep mode"] pub use DMA1LPEN_R as USB1OTGLPEN_R; #[doc = "Field `USB1ULPILPEN` reader - USB_PHY1 clock enable during CSleep mode"] pub use DMA1LPEN_R as USB1ULPILPEN_R; #[doc = "Field `DMA2LPEN` writer - DMA2 Clock Enable During CSleep Mode"] pub use DMA1LPEN_W as DMA2LPEN_W; #[doc = "Field `ADC12LPEN` writer - ADC1/2 Peripheral Clocks Enable During CSleep Mode"] pub use DMA1LPEN_W as ADC12LPEN_W; #[doc = "Field `ETH1MACLPEN` writer - Ethernet MAC bus interface Clock Enable During CSleep Mode"] pub use DMA1LPEN_W as ETH1MACLPEN_W; #[doc = "Field `ETH1TXLPEN` writer - Ethernet Transmission Clock Enable During CSleep Mode"] pub use DMA1LPEN_W as ETH1TXLPEN_W; #[doc = "Field `ETH1RXLPEN` writer - Ethernet Reception Clock Enable During CSleep Mode"] pub use DMA1LPEN_W as ETH1RXLPEN_W; #[doc = "Field `USB1OTGLPEN` writer - USB1OTG peripheral clock enable during CSleep mode"] pub use DMA1LPEN_W as USB1OTGLPEN_W; #[doc = "Field `USB1ULPILPEN` writer - USB_PHY1 clock enable during CSleep mode"] pub use DMA1LPEN_W as USB1ULPILPEN_W; impl R { #[doc = "Bit 0 - DMA1 Clock Enable During CSleep Mode"] #[inline(always)] pub fn dma1lpen(&self) -> DMA1LPEN_R { DMA1LPEN_R::new((self.bits & 1) != 0) } #[doc = "Bit 1 - DMA2 Clock Enable During CSleep Mode"] #[inline(always)] pub fn dma2lpen(&self) -> DMA2LPEN_R { DMA2LPEN_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 5 - ADC1/2 Peripheral Clocks Enable During CSleep Mode"] #[inline(always)] pub fn adc12lpen(&self) -> ADC12LPEN_R { ADC12LPEN_R::new(((self.bits >> 5) & 1) != 0) } #[doc = "Bit 15 - Ethernet MAC bus interface Clock Enable During CSleep Mode"] #[inline(always)] pub fn eth1maclpen(&self) -> ETH1MACLPEN_R { ETH1MACLPEN_R::new(((self.bits >> 15) & 1) != 0) } #[doc = "Bit 16 - Ethernet Transmission Clock Enable During CSleep Mode"] #[inline(always)] pub fn eth1txlpen(&self) -> ETH1TXLPEN_R { ETH1TXLPEN_R::new(((self.bits >> 16) & 1) != 0) } #[doc = "Bit 17 - Ethernet Reception Clock Enable During CSleep Mode"] #[inline(always)] pub fn eth1rxlpen(&self) -> ETH1RXLPEN_R { ETH1RXLPEN_R::new(((self.bits >> 17) & 1) != 0) } #[doc = "Bit 25 - USB1OTG peripheral clock enable during CSleep mode"] #[inline(always)] pub fn usb1otglpen(&self) -> USB1OTGLPEN_R { USB1OTGLPEN_R::new(((self.bits >> 25) & 1) != 0) } #[doc = "Bit 26 - USB_PHY1 clock enable during CSleep mode"] #[inline(always)] pub fn usb1ulpilpen(&self) -> USB1ULPILPEN_R { USB1ULPILPEN_R::new(((self.bits >> 26) & 1) != 0) } } impl W { #[doc = "Bit 0 - DMA1 Clock Enable During CSleep Mode"] #[inline(always)] #[must_use] pub fn dma1lpen(&mut self) -> DMA1LPEN_W<C1_AHB1LPENR_SPEC, 0> { DMA1LPEN_W::new(self) } #[doc = "Bit 1 - DMA2 Clock Enable During CSleep Mode"] #[inline(always)] #[must_use] pub fn dma2lpen(&mut self) -> DMA2LPEN_W<C1_AHB1LPENR_SPEC, 1> { DMA2LPEN_W::new(self) } #[doc = "Bit 5 - ADC1/2 Peripheral Clocks Enable During CSleep Mode"] #[inline(always)] #[must_use] pub fn adc12lpen(&mut self) -> ADC12LPEN_W<C1_AHB1LPENR_SPEC, 5> { ADC12LPEN_W::new(self) } #[doc = "Bit 15 - Ethernet MAC bus interface Clock Enable During CSleep Mode"] #[inline(always)] #[must_use] pub fn eth1maclpen(&mut self) -> ETH1MACLPEN_W<C1_AHB1LPENR_SPEC, 15> { ETH1MACLPEN_W::new(self) } #[doc = "Bit 16 - Ethernet Transmission Clock Enable During CSleep Mode"] #[inline(always)] #[must_use] pub fn eth1txlpen(&mut self) -> ETH1TXLPEN_W<C1_AHB1LPENR_SPEC, 16> { ETH1TXLPEN_W::new(self) } #[doc = "Bit 17 - Ethernet Reception Clock Enable During CSleep Mode"] #[inline(always)] #[must_use] pub fn eth1rxlpen(&mut self) -> ETH1RXLPEN_W<C1_AHB1LPENR_SPEC, 17> { ETH1RXLPEN_W::new(self) } #[doc = "Bit 25 - USB1OTG peripheral clock enable during CSleep mode"] #[inline(always)] #[must_use] pub fn usb1otglpen(&mut self) -> USB1OTGLPEN_W<C1_AHB1LPENR_SPEC, 25> { USB1OTGLPEN_W::new(self) } #[doc = "Bit 26 - USB_PHY1 clock enable during CSleep mode"] #[inline(always)] #[must_use] pub fn usb1ulpilpen(&mut self) -> USB1ULPILPEN_W<C1_AHB1LPENR_SPEC, 26> { USB1ULPILPEN_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "RCC AHB1 Sleep Clock Register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`c1_ahb1lpenr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`c1_ahb1lpenr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct C1_AHB1LPENR_SPEC; impl crate::RegisterSpec for C1_AHB1LPENR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`c1_ahb1lpenr::R`](R) reader structure"] impl crate::Readable for C1_AHB1LPENR_SPEC {} #[doc = "`write(|w| ..)` method takes [`c1_ahb1lpenr::W`](W) writer structure"] impl crate::Writable for C1_AHB1LPENR_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets C1_AHB1LPENR to value 0"] impl crate::Resettable for C1_AHB1LPENR_SPEC { const RESET_VALUE: Self::Ux = 0; }
fn main() { let mut num1 = sum_n(100); num1 = num1 * num1; let num2 = sum_n_sqr(100); println!("diff is {}", num1-num2); } fn sum_n(n:u32) -> u32 { return n*(n+1)/2; } fn sum_n_sqr(n:u32) -> u32 { return n*(n+1)*(2*n+1)/6 }
/* * Datadog API V1 Collection * * Collection of all Datadog Public endpoints. * * The version of the OpenAPI document: 1.0 * Contact: support@datadoghq.com * Generated by: https://openapi-generator.tech */ /// AccessRole : The access role of the user. Options are **st** (standard user), **adm** (admin user), or **ro** (read-only user). /// The access role of the user. Options are **st** (standard user), **adm** (admin user), or **ro** (read-only user). #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum AccessRole { #[serde(rename = "st")] STANDARD, #[serde(rename = "adm")] ADMIN, #[serde(rename = "ro")] READ_ONLY, #[serde(rename = "ERROR")] ERROR, } impl ToString for AccessRole { fn to_string(&self) -> String { match self { Self::STANDARD => String::from("st"), Self::ADMIN => String::from("adm"), Self::READ_ONLY => String::from("ro"), Self::ERROR => String::from("ERROR"), } } }
use serde_json::{Value}; use serde_json::json; use crate::ofn_2_ldtab::util as util; pub fn translate(v : &Value) -> Value { match v[0].as_str() { Some("ObjectInverseOf") => translate_inverse_of(v), Some(_) => panic!(), //None => owl::OWL::Named(String::from(v.as_str().unwrap())), None => translate_named_entity(&v), } } pub fn translate_named_entity(v: &Value) -> Value { let o: String = String::from(v.as_str().unwrap()); json!(o) } pub fn translate_inverse_of(v : &Value) -> Value { let argument: Value = translate(&v[1]); let argument_o : Value = json!({"object" : argument, "datatype" : String::from(util::translate_datatype(&v[1]).as_str().unwrap())}); json!({"owl:inverseOf" : vec![argument_o]}) } pub fn get_object(v : &Value) -> Value { let o: Value = translate(&v); let d: String = String::from(util::translate_datatype(&v).as_str().unwrap()); json!({"object" : o, "datatype" : d}) } pub fn translate_list(v : &[Value]) -> Value { //TODO: refactor common parts if v.len() == 1 { let first_o : Value = get_object(&v[0]); let rest_o : Value = get_object(&json!("rdf:nil")); json!({"rdf:first" : vec![first_o], "rdf:rest" : vec![rest_o]}) } else { //let first: Value = translate(&v[0]); let rest: Value = translate_list(&v[1..]);//datatype is necessarily _JSON? let first_o : Value = get_object(&v[0]); let rest_o : Value = json!({"object" : rest, "datatype" : String::from("_JSON")}); //let rest_o : Value = get_object(rest); // json!({"rdf:first" : vec![first_o], "rdf:rest" : vec![rest_o]}) } }
#![allow(non_snake_case)] #[macro_use] extern crate lazy_static; extern crate serde_json; extern crate vmtests; use serde_json::Value; use std::collections::HashMap; use vmtests::{load_tests, run_test}; lazy_static! { static ref TESTS: HashMap<String, Value> = load_tests("tests/vmEnvironmentalInfo/"); } #[test] fn test_address0() { assert!(run_test(&TESTS["address0"])); } #[test] fn test_address1() { assert!(run_test(&TESTS["address1"])); } #[test] fn test_calldatacopy0() { assert!(run_test(&TESTS["calldatacopy0"])); } #[test] fn test_calldatacopy0_return() { assert!(run_test(&TESTS["calldatacopy0_return"])); } #[test] fn test_calldatacopy1() { assert!(run_test(&TESTS["calldatacopy1"])); } #[test] fn test_calldatacopy1_return() { assert!(run_test(&TESTS["calldatacopy1_return"])); } #[test] fn test_calldatacopy2() { assert!(run_test(&TESTS["calldatacopy2"])); } #[test] fn test_calldatacopy2_return() { assert!(run_test(&TESTS["calldatacopy2_return"])); } #[test] fn test_calldatacopyUnderFlow() { assert!(run_test(&TESTS["calldatacopyUnderFlow"])); } #[test] fn test_calldatacopyZeroMemExpansion() { assert!(run_test(&TESTS["calldatacopyZeroMemExpansion"])); } #[test] fn test_calldatacopyZeroMemExpansion_return() { assert!(run_test(&TESTS["calldatacopyZeroMemExpansion_return"])); } #[test] fn test_calldatacopy_DataIndexTooHigh() { assert!(run_test(&TESTS["calldatacopy_DataIndexTooHigh"])); } #[test] fn test_calldatacopy_DataIndexTooHigh2() { assert!(run_test(&TESTS["calldatacopy_DataIndexTooHigh2"])); } #[test] fn test_calldatacopy_DataIndexTooHigh2_return() { assert!(run_test(&TESTS["calldatacopy_DataIndexTooHigh2_return"])); } #[test] fn test_calldatacopy_DataIndexTooHigh_return() { assert!(run_test(&TESTS["calldatacopy_DataIndexTooHigh_return"])); } #[test] fn test_calldatacopy_sec() { assert!(run_test(&TESTS["calldatacopy_sec"])); } #[test] fn test_calldataload0() { assert!(run_test(&TESTS["calldataload0"])); } #[test] fn test_calldataload1() { assert!(run_test(&TESTS["calldataload1"])); } #[test] fn test_calldataload2() { assert!(run_test(&TESTS["calldataload2"])); } #[test] fn test_calldataloadSizeTooHigh() { assert!(run_test(&TESTS["calldataloadSizeTooHigh"])); } #[test] fn test_calldataloadSizeTooHighPartial() { assert!(run_test(&TESTS["calldataloadSizeTooHighPartial"])); } #[test] fn test_calldataload_BigOffset() { assert!(run_test(&TESTS["calldataload_BigOffset"])); } #[test] fn test_calldatasize0() { assert!(run_test(&TESTS["calldatasize0"])); } #[test] fn test_calldatasize1() { assert!(run_test(&TESTS["calldatasize1"])); } #[test] fn test_calldatasize2() { assert!(run_test(&TESTS["calldatasize2"])); } #[test] fn test_caller() { assert!(run_test(&TESTS["caller"])); } #[test] fn test_callvalue() { assert!(run_test(&TESTS["callvalue"])); } #[test] fn test_codecopy0() { assert!(run_test(&TESTS["codecopy0"])); } #[test] fn test_codecopyZeroMemExpansion() { assert!(run_test(&TESTS["codecopyZeroMemExpansion"])); } #[test] fn test_codecopy_DataIndexTooHigh() { assert!(run_test(&TESTS["codecopy_DataIndexTooHigh"])); } #[test] fn test_codesize() { assert!(run_test(&TESTS["codesize"])); } #[test] fn test_gasprice() { assert!(run_test(&TESTS["gasprice"])); } #[test] fn test_origin() { assert!(run_test(&TESTS["origin"])); }
use super::super::failpoints::failpoint; #[cfg(test)] use super::super::failpoints::Failpoints; use super::error::Error; use super::file_system::{FileKind, OpenMode, SeriesDir}; use super::io_utils::{ReadBytes, WriteBytes}; use crc::crc16; use std::collections::VecDeque; use std::fs::File; use std::io::prelude::*; use std::io::{self, BufWriter}; use std::sync::{Arc, RwLock}; const COMMIT_SIZE: usize = 4 + 4 + 8 + 2; #[cfg(not(test))] const MAX_LOG_SIZE: usize = 2 * 1024 * 1024; #[cfg(test)] const MAX_LOG_SIZE: usize = 80; #[derive(Debug, PartialEq, Clone)] pub struct Commit { pub data_offset: u32, pub index_offset: u32, pub highest_ts: i64, } impl Commit { fn checksum(&self) -> u16 { let table = &crc16::USB_TABLE; let mut checksum = 0u16; checksum = crc16::update(checksum, table, &self.data_offset.to_be_bytes()); checksum = crc16::update(checksum, table, &self.index_offset.to_be_bytes()); checksum = crc16::update(checksum, table, &self.highest_ts.to_be_bytes()); checksum } fn read<R: Read>(read: &mut R) -> Result<Commit, Error> { let commit = Commit { data_offset: read.read_u32()?, index_offset: read.read_u32()?, highest_ts: read.read_i64()?, }; let checksum = read.read_u16()?; if checksum != commit.checksum() { return Err(Error::Crc16Mismatch); } Ok(commit) } fn write<W: Write>( &self, write: &mut W, #[cfg(test)] fp: Arc<Failpoints>, ) -> Result<(), Error> { write.write_u32(&self.data_offset)?; write.write_u32(&self.index_offset)?; failpoint!( fp, "commit::write", Err(Error::Io(io::Error::new(io::ErrorKind::WriteZero, "fp"))) ); write.write_i64(&self.highest_ts)?; write.write_u16(&self.checksum())?; Ok(()) } } #[cfg(test)] mod test_commit { use super::*; #[test] fn test_read_write() -> Result<(), Error> { let commit = Commit { data_offset: 123, index_offset: 321, highest_ts: 110, }; let mut buf = Vec::new(); commit.write(&mut buf, Arc::new(Failpoints::create()))?; assert_eq!(commit, Commit::read(&mut &buf[..])?); buf[COMMIT_SIZE - 2] = 23; buf[COMMIT_SIZE - 1] = 21; assert!(match Commit::read(&mut &buf[..]) { Err(Error::Crc16Mismatch) => true, _ => false, }); Ok(()) } } const FIRST: Commit = Commit { data_offset: 0, index_offset: 0, highest_ts: i64::MIN, }; struct Interior { current: Arc<Commit>, dir: Arc<SeriesDir>, seqs: VecDeque<u64>, current_seq: u64, current_size: usize, failure: bool, writer: BufWriter<File>, #[cfg(test)] #[allow(dead_code)] fp: Arc<Failpoints>, } impl Interior { fn open(dir: Arc<SeriesDir>, #[cfg(test)] fp: Arc<Failpoints>) -> Result<Interior, Error> { let mut seqs: VecDeque<u64> = dir.read_log_sequences()?.into(); let mut current: Option<Commit> = None; for seq in seqs.iter() { let mut file = dir.open(FileKind::Log(*seq), OpenMode::Write)?; loop { match Commit::read(&mut file) { Err(Error::Crc16Mismatch) => { log::warn!("crc16 mismatch in log {:?}", &file); break; } Err(Error::Io(error)) => match error.kind() { io::ErrorKind::UnexpectedEof => break, _ => return Err(Error::Io(error)), }, Err(error) => return Err(error), Ok(entry) => current = Some(entry), } } if let Some(_) = current { break; } } let current = current.unwrap_or(FIRST); let current_seq = seqs.front().map(|seq| seq + 1).unwrap_or(0); seqs.push_front(current_seq); let mut commit_log = Interior { current: Arc::new(current.clone()), dir: dir.clone(), current_seq: current_seq, current_size: 0, seqs: seqs, failure: false, writer: BufWriter::new(dir.open(FileKind::Log(current_seq), OpenMode::Write)?), #[cfg(test)] fp: fp, }; commit_log.commit(current)?; Ok(commit_log) } } impl Interior { fn cleanup(&mut self) -> Result<(), Error> { while self.seqs.len() > 2 { if let Some(seq) = self.seqs.back() { self.dir.remove_log(*seq)?; self.seqs.pop_back(); } } Ok(()) } fn start_next_seq(&mut self) -> Result<(), Error> { let next_seq = self.current_seq + 1; self.writer.flush()?; self.writer = BufWriter::new(self.dir.open(FileKind::Log(next_seq), OpenMode::Write)?); self.current_seq = next_seq; self.current_size = 0; self.seqs.push_front(next_seq); log::debug!("write rotated {:?}", self.writer.get_ref()); Ok(()) } fn recover_if_failed(&mut self) -> Result<(), Error> { if self.failure { self.start_next_seq()?; self.failure = false; } Ok(()) } fn rotate_if_needed(&mut self) -> Result<(), Error> { if self.current_size < MAX_LOG_SIZE { return Ok(()); } self.start_next_seq()?; self.cleanup()?; Ok(()) } fn commit(&mut self, commit: Commit) -> Result<(), Error> { self.recover_if_failed()?; self.rotate_if_needed()?; match commit.write( &mut self.writer, #[cfg(test)] self.fp.clone(), ) { Err(error) => { log::debug!("commit write failed: {:?} {:?}", error, &commit); self.failure = true; return Err(error); } _ => {} }; match self.writer.flush() { Err(error) => { log::debug!("commit sync failed: {:?}", error); self.failure = true; return Err(error.into()); } _ => {} }; self.current = Arc::new(commit); self.current_size += COMMIT_SIZE; Ok(()) } fn current(&self) -> Arc<Commit> { self.current.clone() } } #[cfg(test)] mod test { use super::super::file_system; use super::*; use std::io::{Seek, SeekFrom}; fn commit(i: usize) -> Commit { Commit { data_offset: i as u32, index_offset: i as u32, highest_ts: i as i64, } } #[test] fn test_basic() -> Result<(), Error> { let fs = file_system::test::open()?; let fp = Arc::new(Failpoints::create()); let dir = fs.series("series1")?; { let mut log = Interior::open(dir.clone(), fp.clone())?; assert_eq!(Arc::new(FIRST), log.current()); log.commit(commit(1))?; log.commit(commit(2))?; log.commit(commit(3))?; assert_eq!(Arc::new(commit(3)), log.current()); log.commit(commit(4))?; assert_eq!(Arc::new(commit(4)), log.current()); } { let mut log = Interior::open(dir.clone(), fp.clone())?; assert_eq!(Arc::new(commit(4)), log.current()); log.commit(commit(5))?; log.commit(commit(6))?; } assert_eq!(vec![1u64, 0u64], dir.read_log_sequences()?); { let mut file = dir.open(FileKind::Log(1), OpenMode::Write)?; file.seek(SeekFrom::Start(COMMIT_SIZE as u64 + 1))?; file.write(&[1, 2, 3])?; } { let log = Interior::open(dir.clone(), fp.clone())?; assert_eq!(Arc::new(commit(4)), log.current()); } Ok(()) } #[test] fn test_rotate() -> Result<(), Error> { let fs = file_system::test::open()?; let fp = Arc::new(Failpoints::create()); let dir = fs.series("series1")?; { let mut log = Interior::open(dir.clone(), fp.clone())?; for i in 0..19 { log.commit(commit(i))?; } assert_eq!(vec![3u64, 2u64], dir.read_log_sequences()?); } { let log = Interior::open(dir.clone(), fp.clone())?; assert_eq!(Arc::new(commit(18)), log.current()); } Ok(()) } #[test] fn test_recover() -> Result<(), Error> { let fp = Arc::new(Failpoints::create()); let fs = file_system::test::open()?; let dir = fs.series("series1")?; { let mut log = Interior::open(dir.clone(), fp.clone())?; log.commit(commit(0))?; log.commit(commit(1))?; fp.on("commit::write"); log.commit(commit(2)).unwrap_err(); fp.off("commit::write"); log.commit(commit(2))?; } { let log = Interior::open(dir.clone(), fp.clone())?; assert_eq!(Arc::new(commit(2)), log.current()); } Ok(()) } } pub struct CommitLog { inter: Arc<RwLock<Interior>>, } impl CommitLog { pub fn open(dir: Arc<SeriesDir>, #[cfg(test)] fp: Arc<Failpoints>) -> Result<CommitLog, Error> { Ok(CommitLog { inter: Arc::new(RwLock::new(Interior::open( dir, #[cfg(test)] fp, )?)), }) } pub fn commit(&self, commit: Commit) -> Result<(), Error> { let mut inter = self.inter.write().unwrap(); inter.commit(commit) } pub fn current(&self) -> Arc<Commit> { let inter = self.inter.read().unwrap(); inter.current() } }
use crate::agent::agent_internal::*; use crate::candidate::*; use crate::control::*; use crate::priority::*; use crate::use_candidate::*; use stun::{agent::*, attributes::*, fingerprint::*, integrity::*, message::*, textattrs::*}; use async_trait::async_trait; use std::net::SocketAddr; use std::sync::atomic::Ordering; use std::sync::Arc; use tokio::time::Instant; #[async_trait] trait ControllingSelector { fn start(&mut self); async fn contact_candidates(&mut self); async fn ping_candidate( &mut self, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, ); async fn handle_success_response( &mut self, m: &Message, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, remote_addr: SocketAddr, ); async fn handle_binding_request( &mut self, m: &Message, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, ); } #[async_trait] trait ControlledSelector { fn start(&mut self); async fn contact_candidates(&mut self); async fn ping_candidate( &mut self, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, ); async fn handle_success_response( &mut self, m: &Message, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, remote_addr: SocketAddr, ); async fn handle_binding_request( &mut self, m: &Message, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, ); } impl AgentInternal { async fn is_nominatable(&self, c: &Arc<dyn Candidate + Send + Sync>) -> bool { match c.candidate_type() { CandidateType::Host => { Instant::now().duration_since(self.start_time).as_nanos() > self.host_acceptance_min_wait.as_nanos() } CandidateType::ServerReflexive => { Instant::now().duration_since(self.start_time).as_nanos() > self.srflx_acceptance_min_wait.as_nanos() } CandidateType::PeerReflexive => { Instant::now().duration_since(self.start_time).as_nanos() > self.prflx_acceptance_min_wait.as_nanos() } CandidateType::Relay => { Instant::now().duration_since(self.start_time).as_nanos() > self.relay_acceptance_min_wait.as_nanos() } CandidateType::Unspecified => { log::error!( "is_nominatable invalid candidate type {}", c.candidate_type() ); false } } } async fn nominate_pair(&mut self) { if let Some(pair) = &self.nominated_pair { // The controlling agent MUST include the USE-CANDIDATE attribute in // order to nominate a candidate pair (Section 8.1.1). The controlled // agent MUST NOT include the USE-CANDIDATE attribute in a Binding // request. let (msg, result) = { let username = self.remote_ufrag.clone() + ":" + self.local_ufrag.as_str(); let mut msg = Message::new(); let result = msg.build(&[ Box::new(BINDING_REQUEST), Box::new(TransactionId::new()), Box::new(Username::new(ATTR_USERNAME, username)), Box::new(UseCandidateAttr::default()), Box::new(AttrControlling(self.tie_breaker)), Box::new(PriorityAttr(pair.local.priority())), Box::new(MessageIntegrity::new_short_term_integrity( self.remote_pwd.clone(), )), Box::new(FINGERPRINT), ]); (msg, result) }; if let Err(err) = result { log::error!("{}", err); } else { log::trace!( "ping STUN (nominate candidate pair from {} to {}", pair.local, pair.remote ); let local = pair.local.clone(); let remote = pair.remote.clone(); self.send_binding_request(&msg, &local, &remote).await; } } } pub(crate) fn start(&mut self) { if self.is_controlling { ControllingSelector::start(self); } else { ControlledSelector::start(self); } } pub(crate) async fn contact_candidates(&mut self) { if self.is_controlling { ControllingSelector::contact_candidates(self).await; } else { ControlledSelector::contact_candidates(self).await; } } pub(crate) async fn ping_candidate( &mut self, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, ) { if self.is_controlling { ControllingSelector::ping_candidate(self, local, remote).await; } else { ControlledSelector::ping_candidate(self, local, remote).await; } } pub(crate) async fn handle_success_response( &mut self, m: &Message, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, remote_addr: SocketAddr, ) { if self.is_controlling { ControllingSelector::handle_success_response(self, m, local, remote, remote_addr).await; } else { ControlledSelector::handle_success_response(self, m, local, remote, remote_addr).await; } } pub(crate) async fn handle_binding_request( &mut self, m: &Message, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, ) { if self.is_controlling { ControllingSelector::handle_binding_request(self, m, local, remote).await; } else { ControlledSelector::handle_binding_request(self, m, local, remote).await; } } } #[async_trait] impl ControllingSelector for AgentInternal { fn start(&mut self) { self.start_time = Instant::now(); self.nominated_pair = None; } async fn contact_candidates(&mut self) { // A lite selector should not contact candidates if self.lite { // This only happens if both peers are lite. See RFC 8445 S6.1.1 and S6.2 log::trace!("now falling back to full agent"); } if self.agent_conn.get_selected_pair().await.is_some() { if self.validate_selected_pair().await { log::trace!("checking keepalive"); self.check_keepalive().await; } } else if self.nominated_pair.is_some() { self.nominate_pair().await; } else { let has_nominated_pair = if let Some(p) = self.agent_conn.get_best_valid_candidate_pair().await { self.is_nominatable(&p.local).await && self.is_nominatable(&p.remote).await } else { false }; if has_nominated_pair { if let Some(p) = self.agent_conn.get_best_valid_candidate_pair().await { log::trace!( "Nominatable pair found, nominating ({}, {})", p.local.to_string(), p.remote.to_string() ); p.nominated.store(true, Ordering::SeqCst); self.nominated_pair = Some(p); } self.nominate_pair().await; } else { self.ping_all_candidates().await; } } } async fn ping_candidate( &mut self, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, ) { let (msg, result) = { let username = self.remote_ufrag.clone() + ":" + self.local_ufrag.as_str(); let mut msg = Message::new(); let result = msg.build(&[ Box::new(BINDING_REQUEST), Box::new(TransactionId::new()), Box::new(Username::new(ATTR_USERNAME, username)), Box::new(AttrControlling(self.tie_breaker)), Box::new(PriorityAttr(local.priority())), Box::new(MessageIntegrity::new_short_term_integrity( self.remote_pwd.clone(), )), Box::new(FINGERPRINT), ]); (msg, result) }; if let Err(err) = result { log::error!("{}", err); } else { self.send_binding_request(&msg, local, remote).await; } } async fn handle_success_response( &mut self, m: &Message, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, remote_addr: SocketAddr, ) { if let Some(pending_request) = self.handle_inbound_binding_success(m.transaction_id) { let transaction_addr = pending_request.destination; // Assert that NAT is not symmetric // https://tools.ietf.org/html/rfc8445#section-7.2.5.2.1 if transaction_addr != remote_addr { log::debug!("discard message: transaction source and destination does not match expected({}), actual({})", transaction_addr, remote); return; } log::trace!( "inbound STUN (SuccessResponse) from {} to {}", remote, local ); let selected_pair_is_none = self.agent_conn.get_selected_pair().await.is_none(); if let Some(p) = self.find_pair(local, remote).await { p.state .store(CandidatePairState::Succeeded as u8, Ordering::SeqCst); log::trace!( "Found valid candidate pair: {}, p.state: {}, isUseCandidate: {}, {}", p, p.state.load(Ordering::SeqCst), pending_request.is_use_candidate, selected_pair_is_none ); if pending_request.is_use_candidate && selected_pair_is_none { self.set_selected_pair(Some(Arc::clone(&p))).await; } } else { // This shouldn't happen log::error!("Success response from invalid candidate pair"); } } else { log::warn!( "discard message from ({}), unknown TransactionID 0x{:?}", remote, m.transaction_id ); } } async fn handle_binding_request( &mut self, m: &Message, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, ) { self.send_binding_success(m, local, remote).await; log::trace!("controllingSelector: sendBindingSuccess"); if let Some(p) = self.find_pair(local, remote).await { log::trace!( "controllingSelector: after findPair {}, p.state: {}, {}, {}", p, p.state.load(Ordering::SeqCst), self.nominated_pair.is_none(), self.agent_conn.get_selected_pair().await.is_none() ); if p.state.load(Ordering::SeqCst) == CandidatePairState::Succeeded as u8 && self.nominated_pair.is_none() && self.agent_conn.get_selected_pair().await.is_none() { if let Some(best_pair) = self.agent_conn.get_best_available_candidate_pair().await { log::trace!( "controllingSelector: getBestAvailableCandidatePair {}", best_pair ); if best_pair == p && self.is_nominatable(&p.local).await && self.is_nominatable(&p.remote).await { log::trace!("The candidate ({}, {}) is the best candidate available, marking it as nominated", p.local, p.remote); self.nominated_pair = Some(p); self.nominate_pair().await; } } else { log::trace!("No best pair available"); } } } else { log::trace!("controllingSelector: addPair"); self.add_pair(local.clone(), remote.clone()).await; } } } #[async_trait] impl ControlledSelector for AgentInternal { fn start(&mut self) {} async fn contact_candidates(&mut self) { // A lite selector should not contact candidates if self.lite { self.validate_selected_pair().await; } else if self.agent_conn.get_selected_pair().await.is_some() { if self.validate_selected_pair().await { log::trace!("checking keepalive"); self.check_keepalive().await; } } else { self.ping_all_candidates().await; } } async fn ping_candidate( &mut self, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, ) { let (msg, result) = { let username = self.remote_ufrag.clone() + ":" + self.local_ufrag.as_str(); let mut msg = Message::new(); let result = msg.build(&[ Box::new(BINDING_REQUEST), Box::new(TransactionId::new()), Box::new(Username::new(ATTR_USERNAME, username)), Box::new(AttrControlled(self.tie_breaker)), Box::new(PriorityAttr(local.priority())), Box::new(MessageIntegrity::new_short_term_integrity( self.remote_pwd.clone(), )), Box::new(FINGERPRINT), ]); (msg, result) }; if let Err(err) = result { log::error!("{}", err); } else { self.send_binding_request(&msg, local, remote).await; } } async fn handle_success_response( &mut self, m: &Message, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, remote_addr: SocketAddr, ) { // https://tools.ietf.org/html/rfc8445#section-7.3.1.5 // If the controlled agent does not accept the request from the // controlling agent, the controlled agent MUST reject the nomination // request with an appropriate error code response (e.g., 400) // [RFC5389]. if let Some(pending_request) = self.handle_inbound_binding_success(m.transaction_id) { let transaction_addr = pending_request.destination; // Assert that NAT is not symmetric // https://tools.ietf.org/html/rfc8445#section-7.2.5.2.1 if transaction_addr != remote_addr { log::debug!("discard message: transaction source and destination does not match expected({}), actual({})", transaction_addr, remote); return; } log::trace!( "inbound STUN (SuccessResponse) from {} to {}", remote, local ); if let Some(p) = self.find_pair(local, remote).await { p.state .store(CandidatePairState::Succeeded as u8, Ordering::SeqCst); log::trace!("Found valid candidate pair: {}", p); } else { // This shouldn't happen log::error!("Success response from invalid candidate pair"); } } else { log::warn!( "discard message from ({}), unknown TransactionID 0x{:?}", remote, m.transaction_id ); } } async fn handle_binding_request( &mut self, m: &Message, local: &Arc<dyn Candidate + Send + Sync>, remote: &Arc<dyn Candidate + Send + Sync>, ) { if self.find_pair(local, remote).await.is_none() { self.add_pair(local.clone(), remote.clone()).await; } if let Some(p) = self.find_pair(local, remote).await { let use_candidate = m.contains(ATTR_USE_CANDIDATE); if use_candidate { // https://tools.ietf.org/html/rfc8445#section-7.3.1.5 if p.state.load(Ordering::SeqCst) == CandidatePairState::Succeeded as u8 { // If the state of this pair is Succeeded, it means that the check // previously sent by this pair produced a successful response and // generated a valid pair (Section 7.2.5.3.2). The agent sets the // nominated flag value of the valid pair to true. if self.agent_conn.get_selected_pair().await.is_none() { self.set_selected_pair(Some(Arc::clone(&p))).await; } self.send_binding_success(m, local, remote).await; } else { // If the received Binding request triggered a new check to be // enqueued in the triggered-check queue (Section 7.3.1.4), once the // check is sent and if it generates a successful response, and // generates a valid pair, the agent sets the nominated flag of the // pair to true. If the request fails (Section 7.2.5.2), the agent // MUST remove the candidate pair from the valid list, set the // candidate pair state to Failed, and set the checklist state to // Failed. self.ping_candidate(local, remote).await; } } else { self.send_binding_success(m, local, remote).await; self.ping_candidate(local, remote).await; } } } }
use predicates::prelude::Predicate; use predicates::str::contains; use test_utils::init; use crate::test_utils::{HOME_CFG_FILE, PROJECT_CFG_FILE, PROJECT_DIR}; use short::BIN_NAME; mod test_utils; #[test] fn generate_template() { let mut e = init("generate_template"); e.add_file( PROJECT_CFG_FILE, r#" setups: {} "#, ); e.setup(); let mut command = e.command(BIN_NAME).unwrap(); let r = command .env("RUST_LOG", "debug") .arg("generate") .arg("test_setup_1") .args(&["-t", "test"]) .assert() .to_string(); assert!(contains("generate setup `test_setup_1`:`dev`").eval(&r)); let env_dev = e.path().unwrap().join(PROJECT_DIR).join("env/.dev"); assert!(env_dev.exists()); let run_sh = e.path().unwrap().join(PROJECT_DIR).join("run.sh"); assert!(run_sh.exists()); let r = e.read_file(PROJECT_CFG_FILE); assert_eq!( r#"--- setups: test_setup_1: public_env_dir: "./env/" file: run.sh array_vars: all: ".*" vars: - SETUP_NAME"#, &r ); let r = e.read_file(HOME_CFG_FILE); assert!(contains("setup: test_setup_1").count(1).eval(&r)); assert!(contains("env: dev").count(1).eval(&r)); assert!(contains("test_setup_1").count(2).eval(&r)); } #[test] fn generate_template_with_target_directory() { let mut e = init("generate_template"); e.add_file( PROJECT_CFG_FILE, r#" setups: {} "#, ); e.setup(); let mut command = e.command(BIN_NAME).unwrap(); let r = command .env("RUST_LOG", "debug") .arg("generate") .arg("test_setup_1") .args(&["-t", "test"]) .args(&["-d", "target_directory"]) .assert() .to_string(); assert!(contains("generate setup `test_setup_1`:`dev`").eval(&r)); let r = e.read_file(PROJECT_CFG_FILE); assert_eq!( r#"--- setups: test_setup_1: public_env_dir: target_directory/./env/ file: target_directory/run.sh array_vars: all: ".*" vars: - SETUP_NAME"#, &r ); } #[test] fn generate_template_with_auto_target_directory() { let mut e = init("generate_template"); e.add_file( PROJECT_CFG_FILE, r#" setups: {} "#, ); e.setup(); let mut command = e.command(BIN_NAME).unwrap(); let r = command .env("RUST_LOG", "debug") .arg("generate") .arg("test_setup_1") .args(&["-t", "test"]) .args(&["-d"]) .assert() .to_string(); assert!(contains("generate setup `test_setup_1`:`dev`").eval(&r)); let r = e.read_file(PROJECT_CFG_FILE); assert_eq!( r#"--- setups: test_setup_1: public_env_dir: test_setup_1/./env/ file: test_setup_1/run.sh array_vars: all: ".*" vars: - SETUP_NAME"#, &r ); } #[test] fn generate_template_with_auto_template() { let mut e = init("generate_template"); e.add_file( PROJECT_CFG_FILE, r#" setups: {} "#, ); e.setup(); let mut command = e.command(BIN_NAME).unwrap(); let r = command .env("RUST_LOG", "debug") .arg("generate") .arg("test") .args(&["-t"]) .assert() .to_string(); assert!(contains("generate setup `test`:`dev`").eval(&r)); let r = e.read_file(PROJECT_CFG_FILE); assert_eq!( r#"--- setups: test: public_env_dir: "./env/" file: run.sh array_vars: all: ".*" vars: - SETUP_NAME"#, &r ); }
//! rustc --edition 2018 \ //! -C lto=yes \ //! -C codegen-units=1 \ //! -C opt-level=3 \ //! -C overflow-checks=no \ //! -C panic=abort \ //! -C target-cpu=native \ //! solution1.rs include!("common.rs"); mod basics { use std::convert::{From, TryInto}; use std::cmp::{Ord, PartialOrd, Eq, PartialEq, Ordering, min}; use std::fmt::{Debug, Formatter, Error}; #[derive(Copy, Clone, Debug)] struct Wheel(u8); const WHEELS: [Wheel; 4] = [ Wheel(0), Wheel(1), Wheel(2), Wheel(3), ]; #[derive(Copy, Clone, Default, Eq, PartialEq, Hash)] pub struct State (u16); const BITS: u8 = 4; // number of bits for one wheel const MASK: u16 = (1 << BITS) - 1; impl State { #[inline(always)] fn rotate_up(mut self, wheel: Wheel) -> Self { let bits = wheel.0 * BITS; let val = (self.0 >> bits) & MASK; if val == 9 { let mask = !(MASK << bits); self.0 &= mask; } else { self.0 += 1 << bits; } self } #[inline(always)] fn rotate_down(mut self, wheel: Wheel) -> Self { let bits = wheel.0 * BITS; let val = (self.0 >> bits) & MASK; if val == 0 { self.0 |= 9 << bits; } else { self.0 -= 1 << bits; } self } #[inline(always)] fn to_array(self) -> [u8; 4] { let mut arr = [0_u8; 4]; for (bits, entry) in (0..16).step_by(BITS as usize).zip(&mut arr) { *entry = ((self.0 >> bits) & MASK) as u8 } arr } #[inline(always)] fn to_decimal(self) -> u16 { self.to_array() .iter() .zip(&[1_u16, 10_u16, 100_u16, 1000_u16]) .map(|(entry, ratio)| (*entry as u16) * (*ratio as u16)) .sum() } } impl Debug for State { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { f.write_fmt(format_args!("{:?}", self.to_array())) } } impl From<String> for State { #[inline(always)] fn from(s: String) -> Self { let mut state = 0; let bytes = s.into_bytes(); // Make sure the array is of length 4 AOT so that the compiler can perform // loop unrolling and vectorization. let bytes: &[u8; 4] = bytes.as_slice().try_into().unwrap(); for (bits, byte) in (0..16).step_by(BITS as usize).zip(bytes) { let val = (byte - b'0') as u16; state |= val << bits; } Self(state) } } #[inline(always)] fn distance(x: State, y: State) -> u8 { let x = x.to_array(); let y = y.to_array(); let substracted = x.iter() .zip(&y) .map(|(x, y)| (*x as i8) - (*y as i8)); let diff1 = substracted.clone() .map(|result| { if result < 0 { (0 - result) as u8 } else { result as u8 } }); let diff2 = substracted.clone() .map(|result| (10 + result) as u8); let diff3 = substracted .map(|result| (10 - result) as u8); let min_tup = |(v1, v2)| min(v1, v2); diff1.zip(diff2) .map(min_tup) .zip(diff3) .map(min_tup) .sum() } const MAX_SIZE: usize = 10 * 10 * 10 * 10; const BITSET_SIZE: usize = MAX_SIZE / 8 + (MAX_SIZE % 8) as usize; /// * `SIZE` - number of bytes required for your array struct Bitset { array: [u8; BITSET_SIZE], } impl Default for Bitset { fn default() -> Self { Self { array: [0; BITSET_SIZE], } } } impl Bitset { #[inline(always)] fn set(&mut self, index: usize) { let byte = &mut self.array[index / 8]; *byte |= 1 << (index % 8); } #[inline(always)] fn get(&self, index: usize) -> bool { let bit = self.array[index / 8] >> (index % 8); (bit & 1) != 0 } } #[derive(Default)] pub struct Expander { /// the deadends and the expanded excluded: Bitset, } impl Expander { pub fn new(v: Vec<String>) -> Self { let mut expander: Self = Default::default(); let deadends = &mut expander.excluded; for s in v { let state: State = s.into(); deadends.set(state.to_decimal() as usize); } expander } /// Return (# num of states returned, states, as a fixed-size array) #[inline(always)] fn do_expand(&self, state: State) -> (usize, [State; 8]) { let mut cnt: usize = 0; let mut states = [<State as Default>::default(); 8]; // rotate upwards and downwards separately in different loops to help // vectorization and loop unroll let it1 = WHEELS.iter().map(|wheel| state.rotate_up(*wheel)); let it2 = WHEELS.iter().map(|wheel| state.rotate_down(*wheel)); it1.chain(it2) .map(|new_state| (new_state, new_state.to_decimal())) .for_each(|(new_state, index)| { if !self.excluded.get(index as usize) { states[cnt] = new_state; cnt += 1; } }); (cnt, states) } /// Return (# num of states returned, states, as a fixed-size array) #[inline(always)] pub fn expand(&mut self, state: State) -> (usize, [State; 8]) { let index = state.to_decimal() as usize; if self.excluded.get(index) { return Default::default(); } let ret = self.do_expand(state); self.excluded.set(index); ret } /// This function should be called on the states returned by expand #[inline(always)] pub fn heuristic(&self, state: State, target: State) -> u8 { distance(state, target) } } /// First field represents step_cnt, second represents heuristic /// Since there are at most 10 * 10 * 10 * 10 possible states, the maximum /// step_cnt must also be 10000. /// u16 is more than enough for 10000. #[derive(Copy, Clone, Debug)] pub struct HeapEntry(pub u16, pub u8, pub State); impl HeapEntry { #[inline(always)] fn get_cost(self) -> u16 { self.0 + self.1 as u16 } } impl PartialEq for HeapEntry { #[inline(always)] fn eq(&self, other: &Self) -> bool { self.get_cost() == other.get_cost() } } impl Eq for HeapEntry {} impl PartialOrd for HeapEntry { #[inline(always)] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for HeapEntry { #[inline(always)] fn cmp(&self, other: &Self) -> Ordering { self.get_cost().cmp(&other.get_cost()) } } } // end of mod basics impl Solution { /// Implemented using A* algorithm. pub fn open_lock(deadends: Vec<String>, target: String) -> i32 { use basics::*; use std::collections::binary_heap::BinaryHeap; // Since BinaryHeap by default returns the biggest, Reverse is required use std::cmp::Reverse; let target: State = target.into(); let mut state: State = Default::default(); let mut expander = Expander::new(deadends); let mut heap = BinaryHeap::with_capacity(100); let mut step_cnt: u16 = 0; loop { if state == target { break step_cnt as i32; } step_cnt += 1; let (sz, arr) = expander.expand(state); let it = (&arr[0..sz]).iter() .map(|child_state| { let child_state = *child_state; let h = expander.heuristic(child_state, target); Reverse(HeapEntry(step_cnt, h, child_state)) }); heap.extend(it); match heap.pop() { Some(entry) => { let entry = entry.0; step_cnt = entry.0; state = entry.2; }, None => break -1, }; } } } // end of impl Solution
use crate::types::CmdResult; pub fn do_what(word: &str) -> CmdResult { CmdResult::new(false, format!("What do you want to {}?", word)) } pub fn dont_have(name: &str) -> CmdResult { CmdResult::new(false, format!("You do not have the \"{}\".", name)) }
/* * Datadog API V1 Collection * * Collection of all Datadog Public endpoints. * * The version of the OpenAPI document: 1.0 * Contact: support@datadoghq.com * Generated by: https://openapi-generator.tech */ /// DashboardListListResponse : Information on your dashboard lists. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DashboardListListResponse { /// List of all your dashboard lists. #[serde(rename = "dashboard_lists", skip_serializing_if = "Option::is_none")] pub dashboard_lists: Option<Vec<crate::models::DashboardList>>, } impl DashboardListListResponse { /// Information on your dashboard lists. pub fn new() -> DashboardListListResponse { DashboardListListResponse { dashboard_lists: None, } } }
const MAX_TRIES: u64 = 1; // Attempt to stress tokio::process::Command::spawn // to reproduce a `WouldBlock` error. #[test] fn main() { env_logger::init(); tokio_test::block_on(async { // A little time to launch dtruss tokio::time::delay_for(std::time::Duration::new(3, 0)).await; let mut cmd = tokio::process::Command::new("/bin/ls"); cmd.kill_on_drop(true) // Cleanup at the end of the test .stdout(std::process::Stdio::piped()); // let mut processes = vec![]; for i in 0..10000 { eprintln!("Process {}", i); let mut error = None; for i in 0..MAX_TRIES { match cmd.spawn() { Ok(mut process) => { let _ = process.kill(); error = None; break; } Err(err) => { if err.kind() == std::io::ErrorKind::WouldBlock { error = Some(err); if i + 1 < MAX_TRIES { tokio::time::delay_for(std::time::Duration::new( (i + 1) * (i + 1), 0, )) .await; } continue; } panic!("Could not spawn process: {:?}", err); } } } if let Some(err) = error { panic!( "Could not spawn process after {} retries: {:?}", MAX_TRIES, err ); } } }); }
use std::error::Error; use std::net::TcpListener; use std::path::PathBuf; use std::thread; use clap; use output::Output; pub use server::handler::handle_client; use zbackup::repository::*; use misc::*; use misc::args::ClapSubCommandRzbackupArgs; pub fn run_server ( output: & Output, arguments: & ServerArguments, ) -> Result <bool, String> { let repository = string_result_with_prefix ( || format! ( "Error opening repository: "), Repository::open ( & output, arguments.repository_config.clone (), & arguments.repository_path, arguments.password_file_path.as_ref (), ), ) ?; output.message ( "RZBackup startup complete"); string_result_with_prefix ( || format! ( "RZBackup server encountered error: "), run_server_listener ( repository.clone (), & arguments.listen_address, ), ) ?; // clean up and return repository.close ( output); output.message ( "RZBackup server terminating normally"); Ok (true) } pub fn run_server_listener ( repository: Repository, bind_address: & str, ) -> Result <(), String> { let listener = io_result ( TcpListener::bind ( bind_address), ) ?; for stream in listener.incoming () { match stream { Ok (stream) => { let repository_copy = repository.clone (); thread::spawn ( move || { handle_client ( & repository_copy, stream) } ); }, Err (error) => { println! ( "Connection failed: {}", error.description ()); }, } }; Ok (()) } command! ( name = server, export = server_command, arguments = ServerArguments { repository_path: PathBuf, password_file_path: Option <PathBuf>, repository_config: RepositoryConfig, listen_address: String, }, clap_subcommand = { clap::SubCommand::with_name ("server") .about ("Server component") .arg ( clap::Arg::with_name ("repository") .long ("repository") .alias ("repository-path") .value_name ("REPOSITORY") .required (true) .help ("Path to the repository, used to obtain encryption key") ) .arg ( clap::Arg::with_name ("password-file") .long ("password-file") .alias ("password-file-path") .value_name ("PASSWORD-FILE") .required (false) .help ("Path to the password file") ) .arg ( clap::Arg::with_name ("listen-address") .long ("listen-address") .value_name ("ADDRESS:PORT") .default_value ("localhost:4152") .help ("Address to listen on, in host:port or ip:port format.") ) .repository_config_args () .arg ( clap::Arg::with_name ("work-jobs-total") .long ("work-jobs-total") .value_name ("JOBS") .default_value ("0") .hidden (true) .help ("Deprecated and ignored") ) .arg ( clap::Arg::with_name ("work-jobs-batch") .long ("work-jobs-batch") .value_name ("JOBS") .default_value ("0") .hidden (true) .help ("Deprecated and ignored") ) }, clap_arguments_parse = |clap_matches| { ServerArguments { repository_path: args::path_required ( clap_matches, "repository"), password_file_path: args::path_optional ( clap_matches, "password-file"), repository_config: args::repository_config ( clap_matches), listen_address: args::string_required ( clap_matches, "listen-address"), } }, action = |output, arguments| { run_server (output, arguments) }, ); // ex: noet ts=4 filetype=rust
use std::io::prelude::*; use std::fs::File; use std::io::{BufReader, Error}; use utils; fn read_numbers_as_digit_array() -> Result<Vec<Vec<u32>>, Error> { let f = try!(File::open("../data/problem_013_input.txt")); let reader = BufReader::new(f); let mut nums = vec![vec![]]; for line in reader.lines() { let line = try!(line); let row: Vec<u32> = line.chars().map(|d| d.to_string().parse::<u32>().unwrap()).collect(); nums.push(row); } Ok(nums) } pub fn problem_013() -> u64 { let nums = read_numbers_as_digit_array().unwrap(); let initial_sum = vec![0]; let sum_digit_array = nums.iter().fold(initial_sum,|acc,ref b| utils::add_digit_array(&acc, b)); let first_10_digits: Vec<u32> = sum_digit_array[0..10].to_vec(); utils::from_digit_array(first_10_digits) } #[cfg(test)] mod test { use super::*; use test::Bencher; #[test] fn test_problem_013() { let ans: u64 = problem_013(); println!("Answer to Problem 13: {}", ans); assert!(ans == 5537376230) } #[bench] fn bench_problem_013(b: &mut Bencher) { b.iter(|| problem_013()); } }
use std::env; use std::fs::File; use std::io::Write; use std::path::PathBuf; use git2::Repository; #[macro_use] extern crate quote; fn git_data(repo_src: PathBuf) -> Result<(String, String), Box<dyn std::error::Error>> { let repo = Repository::open(repo_src)?; let head = repo.head()?; let oid = head.target().expect("a valid oid").to_string(); let shortname = head.shorthand().expect("a valid shortname").to_string(); Ok((oid, shortname)) } fn main() -> Result<(), Box<dyn std::error::Error>> { let repo_src = PathBuf::from(&env::var("SYNTH_SRC").unwrap_or("./.".to_string())); let (oid, shortname) = git_data(repo_src).unwrap_or(("unknown".to_string(), "unknown".to_string())); let os = env::var("CARGO_CFG_TARGET_OS").unwrap(); let arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap(); let mut f = File::create(format!("{}/meta.rs", env::var("OUT_DIR").unwrap()))?; write!( &mut f, "{}", quote! { const META_OID: &'static str = #oid; const META_SHORTNAME: &'static str = #shortname; const META_OS: &'static str = #os; const META_ARCH: &'static str = #arch; } )?; Ok(()) }
use super::OpIterator; use crate::StorageManager; use common::ids::Permissions; use common::ids::{ContainerId, TransactionId}; use common::storage_trait::StorageTrait; use common::table::*; use common::{Attribute, CrustyError, TableSchema, Tuple}; use std::sync::{Arc, RwLock}; /// Sequential scan operator pub struct SeqScan { file_iter: <StorageManager as StorageTrait>::ValIterator, schema: TableSchema, open: bool, storage_manager: Arc<StorageManager>, container_id: ContainerId, transaction_id: TransactionId, } impl SeqScan { /// Constructor for the sequential scan operator. /// /// # Arguments /// /// * `table` - Table to scan over. /// * `table_alias` - Table alias given by the user. /// * `tid` - Transaction used to read the table. pub fn new( storage_manager: Arc<StorageManager>, table: Arc<RwLock<Table>>, table_alias: &str, tid: TransactionId, ) -> Self { let table_ref = table.read().unwrap(); let schema = table_ref.schema.clone(); let table_id_downcast = table_ref.id as u16; storage_manager.create_container(table_id_downcast).unwrap(); let file_iter = storage_manager.get_iterator(table_id_downcast, tid, Permissions::ReadOnly); let container_id = table_id_downcast as ContainerId; Self { file_iter, schema: Self::schema(&schema, table_alias), open: false, storage_manager, container_id, transaction_id: tid, } } /// Returns the schema of the table with aliases. /// /// # Arguments /// * `src_schema` - Schema of the source. /// * `alias` - Alias of the table. fn schema(src_schema: &TableSchema, alias: &str) -> TableSchema { let mut attrs = Vec::new(); for a in src_schema.attributes() { let new_name = format!("{}.{}", alias, a.name()); attrs.push(Attribute::new(new_name, a.dtype().clone())); } TableSchema::new(attrs) } } impl OpIterator for SeqScan { fn open(&mut self) -> Result<(), CrustyError> { self.open = true; Ok(()) } fn next(&mut self) -> Result<Option<Tuple>, CrustyError> { if !self.open { panic!("Operator has not been opened") } match self.file_iter.next() { Some(bytes) => Ok(Some(Tuple::from_bytes(&bytes))), None => Ok(None), } } fn close(&mut self) -> Result<(), CrustyError> { self.open = false; Ok(()) } fn rewind(&mut self) -> Result<(), CrustyError> { if !self.open { panic!("Operator has not been opened") } self.file_iter = self.storage_manager.get_iterator( self.container_id, self.transaction_id, Permissions::ReadOnly, ); Ok(()) } fn get_schema(&self) -> &TableSchema { &self.schema } } #[cfg(test)] #[allow(unused_must_use)] mod test { use super::*; use crate::opiterator::testutil::sum_int_fields; use common::ids::TransactionId; use common::testutil::{get_int_table_schema}; use common::testutil::*; const CHECKSUM: i32 = 18; const WIDTH: usize = 3; const TABLE: &str = "SeqScan"; fn get_scan() -> Result<SeqScan, CrustyError> { // Create test table let schema = get_int_table_schema(WIDTH); let table = Arc::new(RwLock::new(Table::new(TABLE.to_string(), schema))); // Create test SM with a container let sm = Arc::new(StorageManager::new_test_sm()); let table_ref = table.read().unwrap(); let table_id_downcast = table_ref.id as u16; sm.create_container(table_id_downcast).unwrap(); // Create test data let tuple = int_vec_to_tuple(vec![1, 2, 3]); let tuple2 = int_vec_to_tuple(vec![1, 2, 3]); let tuple3 = int_vec_to_tuple(vec![1, 2, 3]); let tuple_bytes = serde_cbor::to_vec(&tuple).unwrap(); let tuple_bytes2 = serde_cbor::to_vec(&tuple2).unwrap(); let tuple_bytes3 = serde_cbor::to_vec(&tuple3).unwrap(); let tid = TransactionId::new(); let _rid = sm.insert_value(table_id_downcast, tuple_bytes.clone(), tid); let _rid2 = sm.insert_value(table_id_downcast, tuple_bytes2.clone(), tid); let _rid3 = sm.insert_value(table_id_downcast, tuple_bytes3.clone(), tid); Ok(SeqScan::new(sm.clone(), table.clone(), TABLE, tid)) } #[test] fn test_open() -> Result<(), CrustyError> { let mut scan = get_scan()?; assert_ne!(scan.open, true); scan.open()?; assert!(scan.open); Ok(()) } #[test] fn test_next() -> Result<(), CrustyError> { let mut scan = get_scan()?; scan.open()?; assert_eq!(sum_int_fields(&mut scan)?, CHECKSUM); Ok(()) } #[test] #[should_panic] fn test_next_not_open() { let mut scan = get_scan().unwrap(); scan.next(); } #[test] fn test_close() -> Result<(), CrustyError> { let mut scan = get_scan()?; scan.open()?; assert!(scan.open); scan.close()?; assert_ne!(scan.open, true); Ok(()) } #[test] #[should_panic] fn test_rewind_not_open() { let mut scan = get_scan().unwrap(); scan.rewind(); } #[test] fn test_rewind() -> Result<(), CrustyError> { let mut scan = get_scan()?; scan.open()?; let sum_before = sum_int_fields(&mut scan)?; scan.rewind()?; let sum_after = sum_int_fields(&mut scan)?; assert_eq!(sum_before, sum_after); Ok(()) } #[test] fn test_get_schema() { let scan = get_scan().unwrap(); let original = get_int_table_schema(WIDTH); let prefixed = scan.get_schema(); assert_eq!(original.size(), scan.get_schema().size()); for (orig_attr, prefixed_attr) in original.attributes().zip(prefixed.attributes()) { assert_eq!( format!("{}.{}", TABLE, orig_attr.name()), prefixed_attr.name() ); } } }
mod exponential_backoff; pub use exponential_backoff::{ExponentialBackoff, ExponentialBackoffBuilder};
//! Evaluate use input and set InputCommands and triggers InputEvents. use crate::event; use crate::resource; use bevy::prelude::*; /// Evaluate use input and set InputCommands and triggers InputEvents. pub fn user_input( mut input_commands: ResMut<resource::InputCommands>, mut input_events: EventWriter<event::InputEvent>, key_codes: Res<Input<KeyCode>>, ) { if key_codes.just_pressed(KeyCode::Right) { input_commands.move_camera_right = true; } if key_codes.just_pressed(KeyCode::Left) { input_commands.move_camera_left = true; } if key_codes.just_pressed(KeyCode::Down) { input_commands.move_camera_bottom = true; } if key_codes.just_pressed(KeyCode::Up) { input_commands.move_camera_up = true; } if key_codes.just_released(KeyCode::Right) { input_commands.move_camera_right = false; } if key_codes.just_released(KeyCode::Left) { input_commands.move_camera_left = false; } if key_codes.just_released(KeyCode::Down) { input_commands.move_camera_bottom = false; } if key_codes.just_released(KeyCode::Up) { input_commands.move_camera_up = false; } if key_codes.just_pressed(KeyCode::W) { input_commands.entity_move_up = true; } if key_codes.just_pressed(KeyCode::S) { input_commands.entity_move_down = true; } if key_codes.just_pressed(KeyCode::A) { input_commands.entity_move_left = true; } if key_codes.just_pressed(KeyCode::D) { input_commands.entity_move_right = true; } if key_codes.just_released(KeyCode::W) { input_commands.entity_move_up = false; } if key_codes.just_released(KeyCode::S) { input_commands.entity_move_down = false; } if key_codes.just_released(KeyCode::A) { input_commands.entity_move_left = false; } if key_codes.just_released(KeyCode::D) { input_commands.entity_move_right = false; } if key_codes.just_pressed(KeyCode::Space) { input_events.send(event::InputEvent::SpawnCube); } }
use anyhow::Result; use std::env; use std::error::Error; use std::fmt; use std::fmt::Display; use std::io::prelude::*; use std::io::BufReader; use std::net::{TcpStream, ToSocketAddrs}; use std::process::exit; #[derive(Debug)] struct NameResolutionError<'a> { hostname: &'a str, } impl Display for NameResolutionError<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "NameResolutionError: {}", self.hostname) } } impl Error for NameResolutionError<'_> {} fn main() -> Result<()> { let args: Vec<String> = env::args().collect(); let host = if args.len() > 1 { &args[1] } else { "localhost" }; let mut stream = open_connection(host, "13")?; let mut buf = String::new(); stream.read_line(&mut buf)?; print!("{}", buf); Ok(()) } fn open_connection(host: &str, service: &str) -> Result<BufReader<TcpStream>> { // getaddrinfo let hostname = host.to_string() + ":" + service; let addr = match hostname.to_socket_addrs()?.next() { Some(addr) => addr, None => { exit(1); /* let err = NameResolutionError { hostname: format!("something wrong with name resolution: {}", hostname), }; */ // return err.with_context(); } }; // connect tcp let stream = TcpStream::connect(addr)?; Ok(BufReader::new(stream)) }
use ::*; pub fn sound_loop( stop : Arc<Mutex<bool>>, sound_rx : mpsc::Receiver<&str>, ) { let mut monster_dies = Sound::new("monster_dies.wav").unwrap(); let mut monster_spawns = Sound::new("monster_spawns.wav").unwrap(); let mut player_dies = Sound::new("player_dies.wav").unwrap(); loop { sleep(Duration::from_millis(10)); { if *stop.lock().unwrap() { break; } } while let Ok(choice) = sound_rx.try_recv() { match choice { "monster_dies" => monster_dies.play(), "monster_spawns" => monster_spawns.play(), "player_dies" => player_dies.play(), _ => {}, } } } while player_dies.is_playing() {} }
mod block_assembler; mod component; mod config; pub mod error; pub mod pool; mod process; pub mod service; pub(crate) const LOG_TARGET_TX_POOL: &str = "ckb-tx-pool"; pub use ckb_fee_estimator::FeeRate; pub use component::entry::TxEntry; pub use config::{BlockAssemblerConfig, TxPoolConfig}; pub use process::PlugTarget; pub use service::{TxPoolController, TxPoolServiceBuilder}; pub use tokio::sync::lock::Lock as PollLock;
use crate::AssemblerError; use std::rc::Rc; #[derive(Debug)] pub struct Symbol { name: Rc<String>, val: Type, exported: bool, } #[derive(Debug)] enum Type { Equ(i32), Equs(String), Label(i32), // TODO: actually a section + offset Set(i32), } impl Symbol { // === Constructors === pub fn new_equ(name: String, val: i32) -> Self { Symbol { name: Rc::new(name), val: Type::Equ(val), exported: false, } } pub fn new_equs(name: String, val: String) -> Self { Symbol { name: Rc::new(name), val: Type::Equs(val), exported: false, } } pub fn new_label(name: String, val: i32) -> Self { Symbol { name: Rc::new(name), val: Type::Label(val), exported: false, } } pub fn new_set(name: String, val: i32) -> Self { Symbol { name: Rc::new(name), val: Type::Set(val), exported: false, } } // === Getters === pub fn get_name(&self) -> &Rc<String> { &self.name } pub fn get_str<'a>(&'a self) -> Option<&'a String> { match &self.val { Type::Equs(string) => Some(&string), _ => None, } } pub fn get_value(&self) -> Option<i32> { match self.val { Type::Equ(v) => Some(v), Type::Label(v) => Some(v), Type::Set(v) => Some(v), _ => None, } } pub fn set_value(&mut self, val: i32) { self.val = match self.val { Type::Equ(_) => Type::Equ(val), Type::Set(_) => Type::Set(val), _ => panic!("Impossible to set a non-numeric symbol's value!"), } } // === Actions === pub fn redefine(&mut self, other: Self) -> Result<(), AssemblerError> { debug_assert_eq!(self.name, other.name); unimplemented!(); } pub fn export(&mut self) { self.exported = true; } }
use super::{IntoRecords, Records}; /// A [Records] implementation for any [IntoIterator]. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct IterRecords<I> { iter: I, count_columns: usize, count_rows: Option<usize>, } impl<I> IterRecords<I> { /// Returns a new [IterRecords] object. pub const fn new(iter: I, count_columns: usize, count_rows: Option<usize>) -> Self { Self { iter, count_columns, count_rows, } } } impl<I> IntoRecords for IterRecords<I> where I: IntoRecords, { type Cell = I::Cell; type IterColumns = I::IterColumns; type IterRows = I::IterRows; fn iter_rows(self) -> Self::IterRows { self.iter.iter_rows() } } // why this does not work? // impl<'a, I> IntoRecords for &'a IterRecords<I> // where // &'a I: IntoRecords, // { // type Cell = <&'a I as IntoRecords>::Cell; // type IterColumns = <&'a I as IntoRecords>::IterColumns; // type IterRows = <&'a I as IntoRecords>::IterRows; // fn iter_rows(self) -> Self::IterRows { // // (&self.iter).iter_rows() // todo!() // } // } impl<I> Records for IterRecords<I> where I: IntoRecords, { type Iter = I; fn iter_rows(self) -> <Self::Iter as IntoRecords>::IterRows { self.iter.iter_rows() } fn count_columns(&self) -> usize { self.count_columns } fn hint_count_rows(&self) -> Option<usize> { self.count_rows } } impl<'a, I> Records for &'a IterRecords<I> where &'a I: IntoRecords, { type Iter = &'a I; fn iter_rows(self) -> <Self::Iter as IntoRecords>::IterRows { (&self.iter).iter_rows() } fn count_columns(&self) -> usize { self.count_columns } fn hint_count_rows(&self) -> Option<usize> { self.count_rows } }
extern crate log; extern crate byteorder; extern crate libc; extern crate rand; #[macro_use] pub mod util; pub mod config; pub mod db; pub mod mem; #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } }
//! Contains types useful for implementing custom resource conversion webhooks. pub use self::types::{ ConversionRequest, ConversionResponse, ConversionReview, ConvertConversionReviewError, }; /// Defines low-level typings. mod types;
//! Discovers images in a Connection and assigns them names. We use these for //! image filenames so that models know what the path to a specific image it //! uses will be. use db::{Database, TextureId, PaletteId}; use nitro::Name; use std::collections::HashMap; use util::namers::UniqueNamer; use connection::Connection; type ImageId = (TextureId, Option<PaletteId>); pub struct ImageNamer { pub namer: UniqueNamer, pub names: HashMap<ImageId, String>, pub used_texture_ids: Vec<bool>, // TODO: BitVec } impl ImageNamer { pub fn build(db: &Database, conn: &Connection) -> ImageNamer { let mut image_namer = ImageNamer { namer: UniqueNamer::new(), names: HashMap::new(), used_texture_ids: vec![false; db.textures.len()], }; // Discovery images from model materials for mdl_conn in &conn.models { for mat_conn in &mdl_conn.materials { match mat_conn.image_id() { Ok(Some(image_id)) => image_namer.insert_image_id(db, image_id), _ => continue, } } } // Discover images from pattern animations for mdl_conn in &conn.models { for pat_conn in &mdl_conn.patterns { let pat = &db.patterns[pat_conn.pattern_id]; for track in &pat.material_tracks { for keyframe in &track.keyframes { let tex_idx = keyframe.texture_idx as usize; let texture_id = match pat_conn.texture_ids[tex_idx] { Some(id) => id, None => continue, }; let pal_idx = keyframe.palette_idx as usize; let palette_id = match pat_conn.palette_ids[pal_idx] { Some(id) => id, None => continue, }; let image_id = (texture_id, Some(palette_id)); image_namer.insert_image_id(db, image_id); } } } } image_namer } pub fn insert_image_id(&mut self, db: &Database, image_id: ImageId) { let texture_name = db.textures[image_id.0].name; let namer = &mut self.namer; self.names.entry(image_id).or_insert_with(|| { namer.get_fresh_name(texture_name.print_safe().to_string()) }); self.used_texture_ids[image_id.0] = true; } /// Discover even more images by guessing, based on their names, which /// palettes go with which textures. pub fn add_more_images(&mut self, db: &Database) { let mut num_guesses = 0; let mut still_unextracted = false; for (texture_id, texture) in db.textures.iter().enumerate() { if self.used_texture_ids[texture_id] { continue; } // Direct color textures don't need a palette. if !texture.params.format().desc().requires_palette { self.insert_image_id(db, (texture_id, None)); num_guesses += 1; continue; } // If there's one palette, guess it if db.palettes.len() == 1 { self.insert_image_id(db, (texture_id, Some(0))); num_guesses += 1; continue; } // Guess palette name == texture name if let Some(ids) = db.palettes_by_name.get(&texture.name) { self.insert_image_id(db, (texture_id, Some(ids[0]))); num_guesses += 1; continue; } // Guess palette name == texture_name + "_pl" if let Some(ids) = db.palettes_by_name.get(&append_pl(&texture.name)) { self.insert_image_id(db, (texture_id, Some(ids[0]))); num_guesses += 1; continue; } still_unextracted = true; } info!("Guessed {} new images (for --more-images)", num_guesses); if still_unextracted { info!("There are still unextracted textures though"); } } } /// Append "_pl" to the end of a name. fn append_pl(name: &Name) -> Name { let mut res = name.clone(); // Find the index of the first NUL byte in the suffix of NUL bytes. let mut idx = res.0.iter().rposition(|&x| x != b'\0') .map(|pos| pos + 1) .unwrap_or(0); // Append as much of b"_pl" as will fit. for &b in b"_pl" { if idx == res.0.len() { break; } res.0[idx] = b; idx += 1; } res }
use glium::glutin::{self, dpi::{LogicalSize, PhysicalSize, PhysicalPosition}}; use glium::glutin::event_loop::ControlFlow; use super::viewer::Viewer; use db::Database; use connection::Connection; pub fn main_loop(db: Database, conn: Connection) { let window_builder = glutin::window::WindowBuilder::new() .with_inner_size(LogicalSize { width: super::WINDOW_WIDTH as f64, height: super::WINDOW_HEIGHT as f64 }); let context_builder = glutin::ContextBuilder::new() .with_depth_buffer(24); let events_loop = glutin::event_loop::EventLoop::new(); let display = glium::Display::new(window_builder, context_builder, &events_loop) .expect("failed to get rendering context"); let mut viewer = Viewer::new(&display, db, conn); struct State { last_mouse_xy: PhysicalPosition<f64>, saved_mouse_xy: PhysicalPosition<f64>, mouse_grabbed: bool, win_title: String, cur_time: u64, last_time: u64, }; let mut state = State { last_mouse_xy: PhysicalPosition { x: 0.0, y: 0.0 }, saved_mouse_xy: PhysicalPosition { x: 0.0, y: 0.0 }, mouse_grabbed: false, win_title: String::with_capacity(512), cur_time: time::precise_time_ns(), last_time: time::precise_time_ns(), }; events_loop.run(move |ev, _, control_flow| { *control_flow = ControlFlow::Poll; let gl_window = display.gl_window(); let window = gl_window.window(); state.last_time = state.cur_time; state.cur_time = time::precise_time_ns(); let dt_in_ns = state.cur_time.wrapping_sub(state.last_time); let dt = dt_in_ns as f64 / 1_000_000_000.0; viewer.update(&display, dt); let PhysicalSize { width, height } = window.inner_size(); if width > 0 && height > 0 { viewer.set_aspect_ratio(width as f64 / height as f64); } let mut frame = display.draw(); viewer.draw(&mut frame); frame.finish().expect("rendering error"); state.win_title.clear(); viewer.title(&mut state.win_title); window.set_title(&state.win_title); use self::glutin::event::Event as Ev; use self::glutin::event::WindowEvent as WEv; use self::glutin::event::DeviceEvent as DEv; match ev { Ev::WindowEvent { event, .. } => match event { WEv::CloseRequested => { *control_flow = ControlFlow::Exit; } WEv::KeyboardInput { input, .. } => { if input.virtual_keycode.is_none() { return; } let keycode = input.virtual_keycode.unwrap(); viewer.key(&display, (input.state, keycode, input.modifiers)); } WEv::MouseInput { state: mouse_state, button, .. } => { use self::glutin::event::ElementState as Es; use self::glutin::event::MouseButton as MB; match (mouse_state, button) { (Es::Pressed, MB::Left) => { state.mouse_grabbed = true; state.saved_mouse_xy = state.last_mouse_xy; let _ = window.set_cursor_grab(true); let _ = window.set_cursor_visible(false); } (Es::Released, MB::Left) => { state.mouse_grabbed = false; let _ = window.set_cursor_position(state.saved_mouse_xy); let _ = window.set_cursor_grab(false); let _ = window.set_cursor_visible(true); } _ => (), } } WEv::CursorMoved { position, .. } => { state.last_mouse_xy = position; } WEv::Focused(false) => { viewer.blur(); // Release the mouse state.mouse_grabbed = false; let _ = window.set_cursor_grab(false); let _ = window.set_cursor_visible(true); } _ => () }, Ev::DeviceEvent { event, .. } => match event { DEv::MouseMotion { delta } => { // delta is in an "unspecified coordinate system" but // appears to be pixels on my machine if state.mouse_grabbed { viewer.mouse_drag(delta); } } _ => (), }, _ => (), } }); }
// Copyright (c) 2017-2017 Chef Software Inc. and/or applicable contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. extern crate ansi_term; #[macro_use] extern crate clap; extern crate env_logger; extern crate habitat_common as common; #[macro_use] extern crate habitat_core as hcore; extern crate habitat_launcher_client as launcher_client; #[macro_use] extern crate habitat_sup as sup; extern crate habitat_sup_protocol as protocol; extern crate libc; #[macro_use] extern crate log; extern crate protobuf; extern crate time; extern crate tokio_core; extern crate url; use std::env; use std::io::{self, Write}; use std::net::{SocketAddr, ToSocketAddrs}; use std::path::Path; use std::process; use std::result; use std::str::{self, FromStr}; use clap::{App, ArgMatches}; use common::command::package::install::InstallSource; use common::ui::{Coloring, NONINTERACTIVE_ENVVAR, UI}; use hcore::channel; #[cfg(windows)] use hcore::crypto::dpapi::encrypt; use hcore::crypto::{self, default_cache_key_path, SymKey}; use hcore::env as henv; use hcore::url::{bldr_url_from_env, default_bldr_url}; use launcher_client::{LauncherCli, ERR_NO_RETRY_EXCODE}; use protocol::{ ctl::ServiceBindList, types::{ ApplicationEnvironment, BindingMode, ServiceBind, ServiceGroup, Topology, UpdateStrategy, }, }; use url::Url; use sup::command; use sup::config::{GossipListenAddr, GOSSIP_DEFAULT_PORT}; use sup::error::{Error, Result, SupError}; use sup::feat; use sup::http_gateway; use sup::manager::{Manager, ManagerConfig}; use sup::util; use sup::VERSION; /// Our output key static LOGKEY: &'static str = "MN"; static RING_ENVVAR: &'static str = "HAB_RING"; static RING_KEY_ENVVAR: &'static str = "HAB_RING_KEY"; fn main() { env_logger::init(); enable_features_from_env(); let result = start(); let exit_code = match result { Ok(_) => 0, Err(ref err) => { println!("{}", err); ERR_NO_RETRY_EXCODE } }; debug!("start() returned {:?}; Exiting {}", result, exit_code); process::exit(exit_code); } fn boot() -> Option<LauncherCli> { if !crypto::init() { println!("Crypto initialization failed!"); process::exit(1); } match launcher_client::env_pipe() { Some(pipe) => match LauncherCli::connect(pipe) { Ok(launcher) => Some(launcher), Err(err) => { println!("{}", err); process::exit(1); } }, None => None, } } fn start() -> Result<()> { if feat::is_enabled(feat::TestBootFail) { outputln!("Simulating boot failure"); return Err(sup_error!(Error::TestBootFail)); } let launcher = boot(); let app_matches = match cli().get_matches_safe() { Ok(matches) => matches, Err(err) => { let out = io::stdout(); writeln!(&mut out.lock(), "{}", err.message).expect("Error writing Error to stdout"); match launcher { Some(_) => process::exit(ERR_NO_RETRY_EXCODE), // If we weren't started by a launcher, exit 0 for // help and version None => match err.kind { clap::ErrorKind::HelpDisplayed => process::exit(0), clap::ErrorKind::VersionDisplayed => process::exit(0), _ => process::exit(ERR_NO_RETRY_EXCODE), }, } } }; match app_matches.subcommand() { ("bash", Some(_)) => sub_bash(), ("run", Some(m)) => { let launcher = launcher.ok_or(sup_error!(Error::NoLauncher))?; sub_run(m, launcher) } ("sh", Some(_)) => sub_sh(), ("term", Some(m)) => sub_term(m), _ => unreachable!(), } } fn cli<'a, 'b>() -> App<'a, 'b> { clap_app!(("hab-sup") => (about: "The Habitat Supervisor") (version: VERSION) (author: "\nAuthors: The Habitat Maintainers <humans@habitat.sh>\n") (@setting VersionlessSubcommands) (@setting SubcommandRequiredElseHelp) (@subcommand bash => (about: "Start an interactive Bash-like shell") (aliases: &["b", "ba", "bas"]) ) (@subcommand run => (about: "Run the Habitat Supervisor") (aliases: &["r", "ru"]) (@arg LISTEN_GOSSIP: --("listen-gossip") +takes_value {valid_socket_addr} "The listen address for the gossip system [default: 0.0.0.0:9638]") (@arg LISTEN_HTTP: --("listen-http") +takes_value {valid_socket_addr} "The listen address for the HTTP Gateway [default: 0.0.0.0:9631]") (@arg LISTEN_CTL: --("listen-ctl") +takes_value {valid_socket_addr} "The listen address for the Control Gateway [default: 127.0.0.1:9632]") (@arg NAME: --("override-name") +takes_value "The name of the Supervisor if launching more than one [default: default]") (@arg ORGANIZATION: --org +takes_value "The organization that the Supervisor and its subsequent services are part of \ [default: default]") (@arg PEER: --peer +takes_value +multiple "The listen address of one or more initial peers (IP[:PORT])") (@arg PERMANENT_PEER: --("permanent-peer") -I "If this Supervisor is a permanent peer") (@arg PEER_WATCH_FILE: --("peer-watch-file") +takes_value conflicts_with[peer] "Watch this file for connecting to the ring" ) (@arg RING: --ring -r +takes_value "Ring key name") (@arg CHANNEL: --channel +takes_value "Receive Supervisor updates from the specified release channel [default: stable]") (@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder endpoint. If not specified, the value will \ be taken from the HAB_BLDR_URL environment variable if defined. (default: \ https://bldr.habitat.sh)") (@arg CONFIG_DIR: --("config-from") +takes_value {dir_exists} "Use package config from this path, rather than the package itself") (@arg AUTO_UPDATE: --("auto-update") -A "Enable automatic updates for the Supervisor \ itself") (@arg EVENTS: --events -n +takes_value {valid_service_group} "Name of the service \ group running a Habitat EventSrv to forward Supervisor and service event data to") // === Optional arguments to additionally load an initial service for the Supervisor (@arg PKG_IDENT_OR_ARTIFACT: +takes_value "Load the given Habitat package as part of \ the Supervisor startup specified by a package identifier \ (ex: core/redis) or filepath to a Habitat Artifact \ (ex: /home/core-redis-3.0.7-21120102031201-x86_64-linux.hart).") (@arg APPLICATION: --application -a +takes_value requires[ENVIRONMENT] "Application name; [default: not set].") (@arg ENVIRONMENT: --environment -e +takes_value requires[APPLICATION] "Environment name; [default: not set].") (@arg GROUP: --group +takes_value "The service group; shared config and topology [default: default].") (@arg TOPOLOGY: --topology -t +takes_value {valid_topology} "Service topology; [default: none]") (@arg STRATEGY: --strategy -s +takes_value {valid_update_strategy} "The update strategy; [default: none] [values: none, at-once, rolling]") (@arg BIND: --bind +takes_value +multiple "One or more service groups to bind to a configuration") (@arg BINDING_MODE: --("binding-mode") +takes_value {valid_binding_mode} "Governs how the presence or absence of binds affects service startup. `strict` blocks \ startup until all binds are present. [default: strict] [values: relaxed, strict]") (@arg VERBOSE: -v "Verbose output; shows file and line/column numbers") (@arg NO_COLOR: --("no-color") "Turn ANSI color off") (@arg JSON: --("json-logging") "Use structured JSON logging for the Supervisor. \ Implies NO_COLOR") ) (@subcommand sh => (about: "Start an interactive Bourne-like shell") (aliases: &[]) ) (@subcommand term => (about: "Gracefully terminate the Habitat Supervisor and all of its running services") (@arg NAME: --("override-name") +takes_value "The name of the Supervisor if more than one is running [default: default]") ) ) } fn sub_bash() -> Result<()> { command::shell::bash() } fn sub_run(m: &ArgMatches, launcher: LauncherCli) -> Result<()> { set_supervisor_logging_options(m); let cfg = mgrcfg_from_matches(m)?; let manager = Manager::load(cfg, launcher)?; // We need to determine if we have an initial service to start let svc = if let Some(pkg) = m.value_of("PKG_IDENT_OR_ARTIFACT") { let mut msg = protocol::ctl::SvcLoad::default(); update_svc_load_from_input(m, &mut msg)?; // Always force - running with a package ident is a "do what I mean" operation. You // don't care if a service was loaded previously or not and with what options. You // want one loaded right now and in this way. msg.force = Some(true); let ident = match pkg.parse::<InstallSource>()? { source @ InstallSource::Archive(_) => { // Install the archive manually then explicitly set the pkg ident to the // version found in the archive. This will lock the software to this // specific version. let install = util::pkg::install( &mut ui(), msg.bldr_url .as_ref() .unwrap_or(&*protocol::DEFAULT_BLDR_URL), &source, msg.bldr_channel .as_ref() .unwrap_or(&*protocol::DEFAULT_BLDR_CHANNEL), )?; install.ident.into() } InstallSource::Ident(ident) => ident.into(), }; msg.ident = Some(ident); Some(msg) } else { None }; manager.run(svc) } fn sub_sh() -> Result<()> { command::shell::sh() } fn sub_term(m: &ArgMatches) -> Result<()> { let cfg = mgrcfg_from_matches(m)?; match Manager::term(&cfg) { Err(SupError { err: Error::ProcessLockIO(_, _), .. }) => { println!("Supervisor not started."); Ok(()) } result => result, } } // Internal Implementation Details //////////////////////////////////////////////////////////////////////// fn mgrcfg_from_matches(m: &ArgMatches) -> Result<ManagerConfig> { let mut cfg = ManagerConfig::default(); cfg.auto_update = m.is_present("AUTO_UPDATE"); cfg.update_url = bldr_url(m); cfg.update_channel = channel(m); if let Some(addr_str) = m.value_of("LISTEN_GOSSIP") { cfg.gossip_listen = GossipListenAddr::from_str(addr_str)?; } if let Some(addr_str) = m.value_of("LISTEN_HTTP") { cfg.http_listen = http_gateway::ListenAddr::from_str(addr_str)?; } if let Some(addr_str) = m.value_of("LISTEN_CTL") { cfg.ctl_listen = SocketAddr::from_str(addr_str).unwrap_or_else(|_err| protocol::ctl::default_addr()); } if let Some(name_str) = m.value_of("NAME") { cfg.name = Some(String::from(name_str)); outputln!(""); outputln!("CAUTION: Running more than one Habitat Supervisor is not recommended for most"); outputln!("CAUTION: users in most use cases. Using one Supervisor per host for multiple"); outputln!("CAUTION: services in one ring will yield much better performance."); outputln!(""); outputln!("CAUTION: If you know what you're doing, carry on!"); outputln!(""); } cfg.organization = m.value_of("ORGANIZATION").map(|org| org.to_string()); cfg.gossip_permanent = m.is_present("PERMANENT_PEER"); // TODO fn: Clean this up--using a for loop doesn't feel good however an iterator was // causing a lot of developer/compiler type confusion let mut gossip_peers: Vec<SocketAddr> = Vec::new(); if let Some(peers) = m.values_of("PEER") { for peer in peers { let peer_addr = if peer.find(':').is_some() { peer.to_string() } else { format!("{}:{}", peer, GOSSIP_DEFAULT_PORT) }; let addrs: Vec<SocketAddr> = match peer_addr.to_socket_addrs() { Ok(addrs) => addrs.collect(), Err(e) => { outputln!("Failed to resolve peer: {}", peer_addr); return Err(sup_error!(Error::NameLookup(e))); } }; let addr: SocketAddr = addrs[0]; gossip_peers.push(addr); } } cfg.gossip_peers = gossip_peers; if let Some(watch_peer_file) = m.value_of("PEER_WATCH_FILE") { cfg.watch_peer_file = Some(String::from(watch_peer_file)); } cfg.ring_key = match m.value_of("RING") { Some(val) => Some(SymKey::get_latest_pair_for( &val, &default_cache_key_path(None), )?), None => match henv::var(RING_KEY_ENVVAR) { Ok(val) => { let (key, _) = SymKey::write_file_from_str(&val, &default_cache_key_path(None))?; Some(key) } Err(_) => match henv::var(RING_ENVVAR) { Ok(val) => Some(SymKey::get_latest_pair_for( &val, &default_cache_key_path(None), )?), Err(_) => None, }, }, }; if let Some(events) = m.value_of("EVENTS") { cfg.eventsrv_group = ServiceGroup::from_str(events).ok().map(Into::into); } Ok(cfg) } // Various CLI Parsing Functions //////////////////////////////////////////////////////////////////////// /// Resolve a Builder URL. Taken from CLI args, the environment, or /// (failing those) a default value. fn bldr_url(m: &ArgMatches) -> String { match bldr_url_from_input(m) { Some(url) => url.to_string(), None => default_bldr_url(), } } /// A Builder URL, but *only* if the user specified it via CLI args or /// the environment fn bldr_url_from_input(m: &ArgMatches) -> Option<String> { m.value_of("BLDR_URL") .and_then(|u| Some(u.to_string())) .or_else(|| bldr_url_from_env()) } /// Resolve a channel. Taken from CLI args, or (failing that), a /// default value. fn channel(matches: &ArgMatches) -> String { channel_from_input(matches).unwrap_or(channel::default()) } /// A channel name, but *only* if the user specified via CLI args. fn channel_from_input(m: &ArgMatches) -> Option<String> { m.value_of("CHANNEL").and_then(|c| Some(c.to_string())) } // ServiceSpec Modification Functions //////////////////////////////////////////////////////////////////////// fn get_group_from_input(m: &ArgMatches) -> Option<String> { m.value_of("GROUP").map(ToString::to_string) } /// If the user provides both --application and --environment options, /// parse and set the value on the spec. fn get_app_env_from_input(m: &ArgMatches) -> Result<Option<ApplicationEnvironment>> { if let (Some(app), Some(env)) = (m.value_of("APPLICATION"), m.value_of("ENVIRONMENT")) { Ok(Some(ApplicationEnvironment { application: app.to_string(), environment: env.to_string(), })) } else { Ok(None) } } fn get_topology_from_input(m: &ArgMatches) -> Option<Topology> { m.value_of("TOPOLOGY") .and_then(|f| Topology::from_str(f).ok()) } fn get_strategy_from_input(m: &ArgMatches) -> Option<UpdateStrategy> { m.value_of("STRATEGY") .and_then(|f| UpdateStrategy::from_str(f).ok()) } fn get_binds_from_input(m: &ArgMatches) -> Result<Option<ServiceBindList>> { match m.values_of("BIND") { Some(bind_strs) => { let mut list = ServiceBindList::default(); for bind_str in bind_strs { list.binds.push(ServiceBind::from_str(bind_str)?.into()); } Ok(Some(list)) } None => Ok(None), } } fn get_binding_mode_from_input(m: &ArgMatches) -> Option<BindingMode> { // There won't be errors, because we validate with `valid_binding_mode` m.value_of("BINDING_MODE") .and_then(|b| BindingMode::from_str(b).ok()) } fn get_config_from_input(m: &ArgMatches) -> Option<String> { if let Some(ref config_from) = m.value_of("CONFIG_DIR") { warn!(""); warn!( "WARNING: Setting '--config-from' should only be used in development, not production!" ); warn!(""); Some(config_from.to_string()) } else { None } } #[cfg(target_os = "windows")] fn get_password_from_input(m: &ArgMatches) -> Result<Option<String>> { if let Some(password) = m.value_of("PASSWORD") { Ok(Some(encrypt(password.to_string())?)) } else { Ok(None) } } #[cfg(any(target_os = "linux", target_os = "macos"))] fn get_password_from_input(_m: &ArgMatches) -> Result<Option<String>> { Ok(None) } // CLAP Validation Functions //////////////////////////////////////////////////////////////////////// fn dir_exists(val: String) -> result::Result<(), String> { if Path::new(&val).is_dir() { Ok(()) } else { Err(format!("Directory: '{}' cannot be found", &val)) } } fn valid_binding_mode(val: String) -> result::Result<(), String> { match BindingMode::from_str(&val) { Ok(_) => Ok(()), Err(_) => Err(format!("Binding mode: '{}' is not valid", &val)), } } fn valid_service_group(val: String) -> result::Result<(), String> { match ServiceGroup::validate(&val) { Ok(()) => Ok(()), Err(err) => Err(err.to_string()), } } fn valid_topology(val: String) -> result::Result<(), String> { match Topology::from_str(&val) { Ok(_) => Ok(()), Err(_) => Err(format!("Service topology: '{}' is not valid", &val)), } } fn valid_socket_addr(val: String) -> result::Result<(), String> { match SocketAddr::from_str(&val) { Ok(_) => Ok(()), Err(_) => Err(format!( "Socket address should include both IP and port, eg: '0.0.0.0:9700'" )), } } fn valid_update_strategy(val: String) -> result::Result<(), String> { match UpdateStrategy::from_str(&val) { Ok(_) => Ok(()), Err(_) => Err(format!("Update strategy: '{}' is not valid", &val)), } } fn valid_url(val: String) -> result::Result<(), String> { match Url::parse(&val) { Ok(_) => Ok(()), Err(_) => Err(format!("URL: '{}' is not valid", &val)), } } //////////////////////////////////////////////////////////////////////// fn enable_features_from_env() { let features = vec![ (feat::List, "LIST"), (feat::TestExit, "TEST_EXIT"), (feat::TestBootFail, "BOOT_FAIL"), ]; // If the environment variable for a flag is set to _anything_ but // the empty string, it is activated. for feature in &features { match henv::var(format!("HAB_FEAT_{}", feature.1)) { Ok(_) => { feat::enable(feature.0); outputln!("Enabling feature: {:?}", feature.0); } _ => {} } } if feat::is_enabled(feat::List) { outputln!("Listing feature flags environment variables:"); for feature in &features { outputln!( " * {:?}: HAB_FEAT_{}={}", feature.0, feature.1, henv::var(format!("HAB_FEAT_{}", feature.1)).unwrap_or("".to_string()) ); } outputln!("The Supervisor will start now, enjoy!"); } } fn set_supervisor_logging_options(m: &ArgMatches) { if m.is_present("VERBOSE") { hcore::output::set_verbose(true); } if m.is_present("NO_COLOR") { hcore::output::set_no_color(true); } if m.is_present("JSON") { hcore::output::set_json(true) } } // Based on UI::default_with_env, but taking into account the setting // of the global color variable. // // TODO: Ideally we'd have a unified way of setting color, so this // function wouldn't be necessary. In the meantime, though, it'll keep // the scope of change contained. fn ui() -> UI { let coloring = if hcore::output::is_color() { Coloring::Auto } else { Coloring::Never }; let isatty = if env::var(NONINTERACTIVE_ENVVAR) .map(|val| val == "1" || val == "true") .unwrap_or(false) { Some(false) } else { None }; UI::default_with(coloring, isatty) } /// Set all fields for an `SvcLoad` message that we can from the given opts. This function /// populates all *shared* options between `run` and `load`. fn update_svc_load_from_input(m: &ArgMatches, msg: &mut protocol::ctl::SvcLoad) -> Result<()> { msg.bldr_url = Some(bldr_url(m)); msg.bldr_channel = Some(channel(m)); msg.application_environment = get_app_env_from_input(m)?; msg.binds = get_binds_from_input(m)?; msg.config_from = get_config_from_input(m); if m.is_present("FORCE") { msg.force = Some(true); } msg.group = get_group_from_input(m); msg.svc_encrypted_password = get_password_from_input(m)?; msg.binding_mode = get_binding_mode_from_input(m).map(|v| v as i32); msg.topology = get_topology_from_input(m).map(|v| v as i32); msg.update_strategy = get_strategy_from_input(m).map(|v| v as i32); Ok(()) }
use super::*; use parking_lot::Mutex; use std::{cmp::Ordering, collections::HashSet}; use std::collections::BTreeSet; use std::sync::{Arc, atomic::{Ordering as AtomicOrdering, AtomicBool, AtomicU64}}; use std::time::{Duration, Instant}; use std::thread; use threadpool::ThreadPool; use jsonrpc_http_server::jsonrpc_core::types::Value; use jsonrpc_http_server::jsonrpc_core::types::params::Params; use jsonrpc_http_server::jsonrpc_core::{IoHandler, Error}; use jsonrpc_http_server::{Server, ServerBuilder, DomainsValidation, AccessControlAllowOrigin}; use ton_block::{ AddSub, ShardAccount, HashUpdate, TransactionDescr, TransactionDescrOrdinary, TrComputePhase, TrComputePhaseVm, ComputeSkipReason, ShardStateUnsplit, BlkPrevInfo, Message, Deserializable, OutMsg, OutMsgNew, MsgEnvelope, Grams, OutMsgQueueKey, InMsg, OutMsgImmediately, OutMsgExternal }; use ton_executor::{BlockchainConfig, ExecutorError, OrdinaryTransactionExecutor, TransactionExecutor, ExecuteParams}; use ton_types::{BuilderData, SliceData, IBitstring, Result, AccountId, serialize_toc, HashmapRemover, HashmapE}; #[cfg(test)] #[path = "../../../tonos-se-tests/unit/test_messages.rs"] mod tests; // TODO: I think that 'static - is a bad practice. If you know how to do it without static - please help pub struct MessagesProcessor<T> where T: TransactionsStorage + Send + Sync + 'static, { tr_storage: Arc<T>, queue: Arc<InMessagesQueue>, shard_id: ShardIdent, blockchain_config: BlockchainConfig, executors: Arc<Mutex<HashMap<AccountId, Arc<Mutex<OrdinaryTransactionExecutor>>>>>, } impl<T> MessagesProcessor<T> where T: TransactionsStorage + Send + Sync + 'static, { pub fn with_params( queue: Arc<InMessagesQueue>, tr_storage: Arc<T>, shard_id: ShardIdent, blockchain_config: BlockchainConfig, ) -> Self { // make clone for changes //let shard_state_new = shard_state.lock().unwrap().clone(); Self { tr_storage, queue, shard_id, blockchain_config, executors: Arc::new(Mutex::new(HashMap::new())), } } /// loop-back message to InQueue or send to OutMsgQueue of shard fn route_out_messages( shard: &ShardIdent, queue: Arc<InMessagesQueue>, transaction: Arc<Transaction>, shard_state_new: Arc<Mutex<ShardStateUnsplit>> ) -> NodeResult<()> { let queue = &mut queue.clone(); transaction.iterate_out_msgs(|msg| { // if message destination address belongs current shard // put it to in queue // unwrap is safe, because transaction can generate only // internal and ExternalOutboundMessage if msg.is_internal() { if shard.contains_address(&msg.dst().unwrap())? { queue.priority_queue(QueuedMessage::with_message(msg)?) .map_err(|_| failure::format_err!("Error priority queue message"))?; } else { // let out_msg = OutMsg::New( // OutMsgNew::with_params( // &MsgEnvelope::with_message_and_fee( // TODO need understand how set addresses for Envelop // &msg, // 10u32.into() // TODO need understand where take fee value // )?, // &transaction // )? // ); let out_msg = MsgEnvelope::with_message_and_fee( // TODO need understand how set addresses for Envelop &msg, 10u32.into() // TODO need understand where take fee value )?; let address = OutMsgQueueKey::first_u64(transaction.account_id()); let mut shard_state_new = shard_state_new.lock(); let mut out_msg_queue_info = shard_state_new.read_out_msg_queue_info()?; out_msg_queue_info.out_queue_mut().insert(shard.workchain_id(), address, &out_msg, msg.lt().unwrap())?; shard_state_new.write_out_msg_queue_info(&out_msg_queue_info)?; } } Ok(true) })?; Ok(()) } // /// // /// Generate new block // /// // pub fn generate_block( // &mut self, // shard_state: &ShardStateUnsplit, // timeout: Duration, // seq_no: u32, // prev_ref: BlkPrevInfo, // required_block_at: u32, // debug: bool // ) -> NodeResult<Option<(Block, Option<ShardStateUnsplit>)>> { // debug!("GENBLK"); // let start_time = Instant::now(); // let new_shard_state = Arc::new(Mutex::new(shard_state.clone())); // let builder = BlockBuilder::with_shard_ident( // self.shard_id.clone(), // seq_no, prev_ref, 0, Option::None, // required_block_at); // while start_time.elapsed() < timeout { // if let Some(msg) = self.queue.dequeue_first_unused() { // let res = self.db.put_message(msg.message().clone(), MessageProcessingStatus::Processing, None, None); // if res.is_err() { // warn!(target: "node", "generate_block_multi reflect to db failed. error: {}", res.unwrap_err()); // } // let acc_id = msg.message().header().dest_account_address() // .expect("Can't get dest account address. Seems like outbound message into in-queue"); // let mut acc_opt = new_shard_state.lock().read_accounts()?.account(&acc_id)?; // // TODO it is possible to make account immutable, // // because in executor it is cloned for MerkleUpdate creation // if !self.executors.lock().contains_key(&acc_id) { // self.executors.lock().insert(acc_id.clone(), Arc::new(Mutex::new(E::new()))); // } // let (block_at, block_lt) = builder.at_and_lt(); // let executor = self.executors.lock().get(&acc_id).unwrap().clone(); // let now = Instant::now(); // let transaction = Arc::new(executor.lock().execute( // msg.message().clone(), &mut acc_opt, block_at, block_lt, debug // )?); // let d = now.elapsed(); // debug!(target: "node", "transaction execute time elapsed sec={}.{:06} ", d.as_secs(), d.subsec_micros()); // debug!(target: "node", "transaction status: {}", if transaction.read_description()?.is_aborted() { "Aborted" } else { "Success" }); // if let Some(ref acc) = acc_opt { // new_shard_state.lock().insert_account(acc)?; // } else { // unreachable!("where account?") // } // // loop-back for messages to current-shardchain // Self::route_out_messages(&self.shard_id, self.queue.clone(), transaction.clone(), new_shard_state.clone())?; // self.tr_storage.save_transaction(Arc::clone(&transaction))?; // let in_message = Arc::new( // Self::get_in_msg_from_transaction(&self.shard_id, &transaction)?.unwrap() // ); // let out_messages = Self::get_out_msgs_from_transaction(&self.shard_id, &transaction, &in_message)?; // if !builder.add_transaction(in_message.clone(), out_messages) { // think about how to remove clone // // TODO log error, write to transaction DB about error // } // } else { // thread::sleep(Duration::from_millis(1)); // } // } // info!(target: "node", "in messages queue len={}", self.queue.len()); // self.executors.lock().clear(); // if !builder.is_empty() { // let new_shard_state = std::mem::replace(&mut *new_shard_state.lock(), ShardStateUnsplit::default()); // let (block, _count) = builder.finalize_block(shard_state, &new_shard_state)?; // Ok(Some((block, Some(new_shard_state)))) // } else { // Ok(None) // } // } fn try_prepare_transaction( builder: &BlockBuilder, executor: &OrdinaryTransactionExecutor, acc_root: &mut Cell, msg: &Message, acc_last_lt: u64, debug: bool, ) -> NodeResult<(Transaction, u64)> { let (block_at, block_lt) = builder.at_and_lt(); let last_lt = std::cmp::max(acc_last_lt, block_lt); let lt = Arc::new(AtomicU64::new(last_lt + 1)); let result = executor.execute_with_libs_and_params( Some(&msg), acc_root, ExecuteParams { state_libs: HashmapE::default(), block_unixtime: block_at, block_lt, last_tr_lt: Arc::clone(&lt), debug, ..ExecuteParams::default() }, ); match result { Ok(transaction) => Ok((transaction, lt.load(AtomicOrdering::Relaxed))), Err(err) => { let lt = last_lt + 1; let account = Account::construct_from_cell(acc_root.clone())?; let mut transaction = Transaction::with_account_and_message(&account, msg, lt)?; transaction.set_now(block_at); let mut description = TransactionDescrOrdinary::default(); description.aborted = true; match err.downcast_ref::<ExecutorError>() { Some(ExecutorError::NoAcceptError(error, arg)) => { let mut vm_phase = TrComputePhaseVm::default(); vm_phase.success = false; vm_phase.exit_code = *error; if let Some(item) = arg { vm_phase.exit_arg = match item.as_integer().and_then(|value| value.into(std::i32::MIN..=std::i32::MAX)) { Err(_) | Ok(0) => None, Ok(exit_arg) => Some(exit_arg) }; } description.compute_ph = TrComputePhase::Vm(vm_phase); } Some(ExecutorError::NoFundsToImportMsg) => { description.compute_ph = if account.is_none() { TrComputePhase::skipped(ComputeSkipReason::NoState) } else { TrComputePhase::skipped(ComputeSkipReason::NoGas) }; } Some(ExecutorError::ExtMsgComputeSkipped(reason)) => { description.compute_ph = TrComputePhase::skipped(reason.clone()); } _ => return Err(err)? } transaction.write_description(&TransactionDescr::Ordinary(description))?; let hash = acc_root.repr_hash(); let state_update = HashUpdate::with_hashes(hash.clone(), hash); transaction.write_state_update(&state_update)?; Ok((transaction, lt)) } } } fn execute_thread( blockchain_config: BlockchainConfig, shard_id: &ShardIdent, queue: Arc<InMessagesQueue>, tr_storage: Arc<T>, executors: Arc<Mutex<HashMap<AccountId, Arc<Mutex<OrdinaryTransactionExecutor>>>>>, msg: QueuedMessage, builder: Arc<BlockBuilder>, acc_id: &AccountId, new_shard_state: Arc<Mutex<ShardStateUnsplit>>, debug: bool ) -> NodeResult<()> { let shard_acc = new_shard_state.lock().read_accounts()?.account(acc_id)?.unwrap_or_default(); let mut acc_root = shard_acc.account_cell().clone(); // TODO it is possible to make account immutable, // because in executor it is cloned for MerkleUpdate creation if !executors.lock().contains_key(acc_id) { let e = OrdinaryTransactionExecutor::new(blockchain_config); executors.lock().insert(acc_id.clone(), Arc::new(Mutex::new(e))); } debug!("Executing message {}", msg.message().hash()?.to_hex_string()); let now = Instant::now(); let executor = executors.lock().get(acc_id).unwrap().clone(); let (mut transaction, max_lt) = Self::try_prepare_transaction( &builder, &executor.lock(), &mut acc_root, msg.message(), shard_acc.last_trans_lt(), debug )?; transaction.set_prev_trans_hash(shard_acc.last_trans_hash().clone()); transaction.set_prev_trans_lt(shard_acc.last_trans_lt()); let transaction = Arc::new(transaction); info!(target: "profiler", "Transaction time: {} micros", now.elapsed().as_micros()); // info!(target: "profiler", "Init time: {} micros", executor.lock().timing(0)); // info!(target: "profiler", "Compute time: {} micros", executor.lock().timing(1)); // info!(target: "profiler", "Finalization time: {} micros", executor.lock().timing(2)); debug!("Transaction ID {}", transaction.hash()?.to_hex_string()); debug!(target: "executor", "Transaction aborted: {}", transaction.read_description()?.is_aborted()); let now = Instant::now(); // update or remove shard account in new shard state let acc = Account::construct_from_cell(acc_root)?; if !acc.is_none() { let shard_acc = ShardAccount::with_params(&acc, transaction.hash()?, transaction.logical_time())?; new_shard_state.lock().insert_account(&UInt256::from_slice(&acc_id.get_bytestring(0)), &shard_acc)?; } else { let mut shard_state = new_shard_state.lock(); let mut accounts = shard_state.read_accounts()?; accounts.remove(acc_id.clone())?; shard_state.write_accounts(&accounts)?; } // loop-back for messages to current-shardchain Self::route_out_messages(shard_id, queue.clone(), transaction.clone(), new_shard_state.clone())?; if let Ok(Some(tr)) = tr_storage.find_by_lt(transaction.logical_time(), &acc_id) { panic!("{:?}\n{:?}", tr, transaction) } tr_storage.save_transaction(Arc::clone(&transaction))?; let in_message = Self::get_in_msg_from_transaction(shard_id, &transaction)?.unwrap(); let imported_fees = in_message.get_fee()?; let out_messages = Self::get_out_msgs_from_transaction(shard_id, &transaction, &in_message)?; let mut exported_value = CurrencyCollection::new(); let mut exported_fees = Grams::zero(); let mut out_msg_vec = vec![]; for m in out_messages.iter() { let out_msg_val = m.exported_value()?; exported_value.add(&out_msg_val)?; exported_value.grams.add(&out_msg_val.grams)?; exported_fees.add(&out_msg_val.grams)?; let exp_val = m.exported_value()?; // All out-messages there must contain message (as out msgs of transaction) out_msg_vec.push((m.serialize()?, exp_val)); } // in-messages of transaction must contain message let transaction_cell = transaction.serialize()?; let context = AppendSerializedContext { in_msg: in_message.serialize()?, out_msgs: out_msg_vec, transaction, transaction_cell, max_lt, imported_value: Some(imported_fees.value_imported.clone()), exported_value, imported_fees, exported_fees, }; if !builder.add_serialized_transaction(context) { warn!(target: "node", "Error append serialized transaction info to BlockBuilder"); // TODO log error, write to transaction DB about error } info!(target: "profiler", "Transaction saving time: {} micros", now.elapsed().as_micros()); Ok(()) } /// /// Generate new block /// pub fn generate_block_multi( &mut self, shard_state: &ShardStateUnsplit, timeout: Duration, seq_no: u32, prev_ref: BlkPrevInfo, required_block_at: u32, debug: bool ) -> NodeResult<Option<(Block, Option<ShardStateUnsplit>)>> { debug!("GENBLKMUL"); let now = Instant::now(); let start_time = Instant::now(); let pool = ThreadPool::new(16); let new_shard_state = Arc::new(Mutex::new(shard_state.clone())); let builder = Arc::new(BlockBuilder::with_shard_ident( self.shard_id.clone(), seq_no, prev_ref, 0, Option::None, required_block_at)); let mut is_empty = true; while start_time.elapsed() < timeout { if let Some(msg) = self.queue.dequeue_first_unused() { let acc_id = msg.message().int_dst_account_id().unwrap(); // lock account in queue self.queue.lock_account(acc_id.clone()); let shard_id = self.shard_id.clone(); let queue = self.queue.clone(); let storage = self.tr_storage.clone(); let executors = self.executors.clone(); let builder = builder.clone(); let shard_state = new_shard_state.clone(); let blockchain_config = self.blockchain_config.clone(); let th = move || { let res = Self::execute_thread( blockchain_config, &shard_id, queue.clone(), storage, executors, msg, builder, &acc_id, shard_state, debug ); queue.unlock_account(&acc_id); if !res.is_ok() { warn!(target: "node", "Executor execute failed. {}", res.unwrap_err()); } }; pool.execute(th); is_empty = false; } else { thread::sleep(Duration::from_nanos(100)); } } pool.join(); let time0 = now.elapsed().as_micros(); info!(target: "node", "in messages queue len={}", self.queue.len()); self.executors.lock().clear(); self.queue.locks_clear(); if !is_empty { let new_shard_state = std::mem::take(&mut *new_shard_state.lock()); let (block, count) = builder.finalize_block(shard_state, &new_shard_state)?; info!(target: "profiler", "Block time: non-final/final {} / {} micros, transaction count: {}", time0, now.elapsed().as_micros(), count ); Ok(Some((block, Some(new_shard_state)))) } else { Ok(None) } } fn get_in_msg_from_transaction(_shard_id: &ShardIdent, transaction: &Transaction) -> NodeResult<Option<InMsg>> { if let Some(ref msg) = transaction.read_in_msg()? { let msg = if msg.is_inbound_external() { InMsg::external(msg, transaction)? } else { let fee = msg.get_fee()?.unwrap_or_default(); let env = MsgEnvelope::with_message_and_fee(msg, fee.clone())?; InMsg::immediatelly(&env, transaction, fee)? }; Ok(Some(msg)) } else { Ok(None) } } fn get_out_msgs_from_transaction(shard_id: &ShardIdent, transaction: &Transaction, reimport: &InMsg) -> NodeResult<Vec<OutMsg>> { let mut res = vec![]; let tr_cell = transaction.serialize()?; transaction.iterate_out_msgs(|ref msg| { res.push(if msg.is_internal() { if shard_id.contains_address(&msg.dst().unwrap())? { OutMsg::Immediately(OutMsgImmediately::with_params( &MsgEnvelope::with_message_and_fee(msg, Grams::one())?, tr_cell.clone(), reimport)?) } else { OutMsg::New(OutMsgNew::with_params( &MsgEnvelope::with_message_and_fee(msg, Grams::one())?, tr_cell.clone())?) } } else { OutMsg::External(OutMsgExternal::with_params(msg, tr_cell.clone())?) }); Ok(true) })?; Ok(res) } } /// Json rpc server for receiving external outbound messages. /// TODO the struct is not used now (15.08.19). It is candidate to deletion. pub struct JsonRpcMsgReceiver { host: String, port: String, server: Option<Server>, } #[allow(dead_code)] impl MessagesReceiver for JsonRpcMsgReceiver { /// Start to receive messages. The function runs the receive thread and returns control. fn run(&mut self, queue: Arc<InMessagesQueue>) -> NodeResult<()> { if self.server.is_some() { node_err!(NodeErrorKind::InvalidOperation) } else { let mut io = IoHandler::default(); io.add_method("call", move |params| Self::process_call(params, Arc::clone(&queue))); self.server = Some(ServerBuilder::new(io) .cors(DomainsValidation::AllowOnly(vec![AccessControlAllowOrigin::Null])) .start_http(&format!("{}:{}", self.host, self.port).parse().unwrap())?); Ok(()) } } } #[allow(dead_code)] impl JsonRpcMsgReceiver { /// Create a new instance of the struct witch put received messages into given queue pub fn with_params(host: &str, port: &str) -> Self { Self { host: String::from(host), port: String::from(port), server: None, } } /// Stop receiving. Sends message to the receive thread and waits while it stops. pub fn stop(&mut self) -> NodeResult<()> { if self.server.is_some() { let s = std::mem::replace(&mut self.server, None); s.unwrap().close(); Ok(()) } else { node_err!(NodeErrorKind::InvalidOperation) } } fn process_call(params: Params, msg_queue: Arc<InMessagesQueue>) -> jsonrpc_http_server::jsonrpc_core::Result<Value> { const MESSAGE: &str = "message"; let map = match params { Params::Map(map) => map, _ => return Err(Error::invalid_params("Unresolved parameters object.")) }; let message = match map.get(MESSAGE) { Some(Value::String(string)) => string, Some(_) => return Err(Error::invalid_params(format!("\"{}\" parameter must be a string.", MESSAGE))), _ => return Err(Error::invalid_params(format!("\"{}\" parameter not found.", MESSAGE))) }; let message = Message::construct_from_base64(&message).map_err(|err| Error::invalid_params(format!("Error parcing message: {}", err)) )?; msg_queue.queue(QueuedMessage::with_message(message).unwrap()).expect("Error queue message"); Ok(Value::String(String::from("The message has been succesfully received"))) } } /// Struct RouteMessage. Stored peedId of thew node received message #[derive(Clone, Debug, PartialEq, Eq)] pub struct RouteMessage { pub peer: usize, pub msg: Message } #[derive(Clone, Debug, PartialEq, Eq)] pub enum QueuedMessageInternal { Message(Message), RouteMessage(RouteMessage) } impl QueuedMessageInternal { pub fn message(&self) -> &Message { match self { QueuedMessageInternal::Message(ref msg) => msg, QueuedMessageInternal::RouteMessage(ref r_msg) => &r_msg.msg, } } pub fn message_mut(&mut self) -> &mut Message { match self { QueuedMessageInternal::Message(ref mut msg) => msg, QueuedMessageInternal::RouteMessage(ref mut r_msg) => &mut r_msg.msg, } } } #[derive(Clone, Debug, PartialEq, Eq)] pub struct QueuedMessage { internal: QueuedMessageInternal, hash: UInt256, } impl Default for QueuedMessage { fn default() -> Self { Self::with_message(Message::default()).unwrap() } } impl PartialOrd for QueuedMessage { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for QueuedMessage { fn cmp(&self, other: &Self) -> Ordering { // All messages without LT will be at the end of the queue let result = self.message().lt() .unwrap_or(u64::max_value()) .cmp(&other.message().lt().unwrap_or(u64::max_value())); if result == Ordering::Equal { return self.hash.cmp(&other.hash); } result } } impl QueuedMessage { pub fn with_message(message: Message) -> Result<Self> { Self::new(QueuedMessageInternal::Message(message)) } pub fn with_route_message(message: RouteMessage) -> Result<Self> { Self::new(QueuedMessageInternal::RouteMessage(message)) } fn new(internal: QueuedMessageInternal) -> Result<Self> { let hash = internal.message().serialize()?.repr_hash(); Ok(Self { internal, hash, }) } pub fn message(&self) -> &Message { self.internal.message() } pub fn message_mut(&mut self) -> &mut Message { self.internal.message_mut() } } impl Serializable for QueuedMessage { fn write_to(&self, cell: &mut BuilderData) -> Result<()> { match &self.internal { QueuedMessageInternal::Message(msg) => { cell.append_bits(0b1001, 4)?; msg.write_to(cell)?; }, QueuedMessageInternal::RouteMessage(rm) => { cell.append_bits(0b0110, 4)?; (rm.peer as u64).write_to(cell)?; rm.msg.write_to(cell)?; } } Ok(()) } } impl Deserializable for QueuedMessage { fn read_from(&mut self, slice: &mut SliceData) -> Result<()> { let tag = slice.get_next_int(4)? as usize; match tag { 0b1001 => { *self = Self::with_message(Message::construct_from(slice)?)?; }, 0b0110 => { let mut peer: u64 = 0; let mut msg = Message::default(); peer.read_from(slice)?; msg.read_from(slice)?; *self = Self::with_route_message(RouteMessage{ peer: peer as usize, msg })?; }, _ => (), } Ok(()) } } /// This FIFO accumulates inbound messages from all types of receivers. /// The struct might be used from many threads. It provides internal mutability. pub struct InMessagesQueue { shard_id: ShardIdent, storage: Mutex<BTreeSet<QueuedMessage>>, out_storage: Mutex<VecDeque<QueuedMessage>>, db: Option<Arc<Box<dyn DocumentsDb>>>, used_accs: Mutex<HashSet<AccountId>>, capacity: usize, ready_to_process: AtomicBool, } #[allow(dead_code)] impl InMessagesQueue { /// Create new instance of InMessagesQueue. pub fn new(shard_id: ShardIdent, capacity: usize) -> Self { InMessagesQueue { shard_id, storage: Mutex::new(BTreeSet::new()), out_storage: Mutex::new(VecDeque::new()), used_accs: Mutex::new(HashSet::new()), db: None, capacity, ready_to_process: AtomicBool::new(false), } } pub fn with_db(shard_id: ShardIdent, capacity: usize, db: Arc<Box<dyn DocumentsDb>>) -> Self { InMessagesQueue { shard_id, storage: Mutex::new(BTreeSet::new()), out_storage: Mutex::new(VecDeque::new()), used_accs: Mutex::new(HashSet::new()), db: Some(db), capacity, ready_to_process: AtomicBool::new(false), } } /// /// Set in message queue ready-mode /// true - node ready to process messages and generate block /// false - node receive messages and route they to another nodes /// pub fn set_ready(&self, mode: bool) { info!(target: "node", "in message queue set ready-mode: {}", mode); self.ready_to_process.store(mode, AtomicOrdering::SeqCst); } /// /// Get mode /// pub fn ready(&self) -> bool { self.ready_to_process.load(AtomicOrdering::SeqCst) } pub fn has_delivery_problems(&self) -> bool { self.db.as_ref().map_or(false, |db| db.has_delivery_problems()) } fn route_message_to_other_node(&self, msg: QueuedMessage) -> std::result::Result<(), QueuedMessage> { let mut out_storage = self.out_storage.lock(); out_storage.push_back(msg); Ok(()) } fn is_message_to_current_node(&self, msg: &Message) -> bool { if let Some(msg_dst) = msg.dst() { return self.shard_id.contains_address(&msg_dst).unwrap() } true // if message hasn`t workchain or address, it will be process any node } /// Include message into end queue. pub fn queue(&self, msg: QueuedMessage) -> std::result::Result<(), QueuedMessage> { // messages unsuitable to this node route all time if !self.is_message_to_current_node(msg.message()) { debug!(target: "node", "MESSAGE-IS-FOR-OTHER-NODE {:?}", msg); return self.route_message_to_other_node(msg); } if self.has_delivery_problems() { debug!(target: "node", "Has delivery problems"); return Err(msg); } let mut storage = self.storage.lock(); if storage.len() >= self.capacity { return Err(msg); } storage.insert(msg.clone()); debug!(target: "node", "Queued message: {:?}", msg.message()); Ok(()) } /// Include message into begin queue fn priority_queue(&self, msg: QueuedMessage) -> std::result::Result<(), QueuedMessage> { if !self.is_message_to_current_node(msg.message()) { return self.route_message_to_other_node(msg); } let mut storage = self.storage.lock(); let msg_str = format!("{:?}", msg.message()); storage.insert(msg); debug!(target: "node", "Priority queued message: {}", msg_str); Ok(()) } /// Extract oldest message from queue. pub fn dequeue(&self) -> Option<QueuedMessage> { let mut storage = self.storage.lock(); let first = if let Some(first) = storage.iter().next() { first.clone() } else { return None; }; storage.remove(&first); Some(first) } /// Extract oldest message from out_queue. pub fn dequeue_out(&self) -> Option<QueuedMessage> { let mut out_storage = self.out_storage.lock(); out_storage.pop_front() } /// Extract oldest message from queue if message account not using in executor pub fn dequeue_first_unused(&self) -> Option<QueuedMessage> { let mut storage = self.storage.lock(); let used_accs = self.used_accs.lock(); // iterate from front and find unused account message let result = storage.iter().find(|msg| { msg.message().int_dst_account_id() .map(|acc_id| !used_accs.contains(&acc_id)) .unwrap_or(false) }).cloned(); if let Some(ref msg) = result { storage.remove(msg); } result } pub fn print_message(msg: &Message) { log::info!("message: {:?}", msg); if let Ok(cell) = msg.serialize() { if let Ok(data) = serialize_toc(&cell) { std::fs::create_dir_all("export").ok(); std::fs::write(&format!("export/msg_{:x}", cell.repr_hash()), &data).ok(); } } } pub fn is_full(&self) -> bool { dbg!(self.len()) >= self.capacity } /// The length of queue. pub fn len(&self) -> usize { self.storage.lock().len() } /// lock account message for dequeue pub fn lock_account(&self, account_id: AccountId) { self.used_accs.lock().insert(account_id); } /// unlock account mesages for dequeue pub fn unlock_account(&self, account_id: &AccountId) { self.used_accs.lock().remove(account_id); } /// Unlock all accounts pub fn locks_clear(&self) { self.used_accs.lock().clear(); } } /// is account_id has prefix identically prefix of shard pub fn is_in_current_shard(shard_id: &ShardIdent, account_wc: i32, account_id: &AccountId) -> bool { if shard_id.workchain_id() != account_wc { debug!(target: "node", "WORKCHAIN mismatch: Node {}, Msg {}", shard_id.workchain_id(), account_wc); } shard_id.contains_account(account_id.clone()).unwrap() }
use std::{ env, fs }; fn main() { let args: Vec<String> = env::args().collect(); let path: String = args[1].clone(); let _settings: String = args[2].clone(); let files: Vec<String> = vec![String::new(); 64]; let _file_list = fetch_files(path, files); } fn fetch_files(_path: String, arr: Vec<String>) -> Vec<String> { let paths = fs::read_dir(_path).unwrap(); for path in paths { let path_name: String = path.unwrap().path().display().to_string(); if path.unwrap().path().is_dir() { let next: Vec<String> = fetch_files(path_name, arr; let mut full = Vec::<String>::with_capacity(next.len() + arr.len()); full.extend(arr); full.extend(next); } } return arr; }
/// Rust platform tiers: support levels are organized into three tiers, each /// with a different set of guarantees. #[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] pub enum Tier { /// Tier 1 platforms can be thought of as “guaranteed to work”. /// Specifically they will each satisfy the following requirements: /// /// * Official binary releases are provided for the platform. /// * Automated testing is set up to run tests for the platform. /// * Landing changes to the rust-lang/rust repository’s master branch /// is gated on tests passing. /// * Documentation for how to use and how to build the platform is available. One, /// Tier 2 platforms can be thought of as “guaranteed to build”. Automated /// tests are not run so it’s not guaranteed to produce a working build, /// but platforms often work to quite a good degree and patches are always /// welcome! /// /// Specifically, these platforms are required to have each of the following: /// /// * Official binary releases are provided for the platform. /// * Automated building is set up, but may not be running tests. /// * Landing changes to the rust-lang/rust repository’s master branch is /// gated on platforms building. For some platforms only the standard /// library is compiled, but for others rustc and cargo are too. Two, /// Tier 3 platforms are those which the Rust codebase has support for, but /// which are not built or tested automatically, and may not work. /// Official builds are not available. Three, } impl Tier { /// Get a number identifying this tier pub fn to_usize(self) -> usize { match self { Tier::One => 1, Tier::Two => 2, Tier::Three => 3, } } /// Get a string identifying this tier pub fn as_str(self) -> &'static str { match self { Tier::One => "tier1", Tier::Two => "tier2", Tier::Three => "tier3", } } }
/// IO primitives pub mod io; /// PCI pub mod pci; /// PS2 pub mod ps2; /// RTC pub mod rtc; /// Serial pub mod serial; /// Layouts pub mod kb_layouts;
mod reg_size; pub use self::reg_size::RegSize; mod reg; pub use self::reg::{ Register, parse_reg, parse_512bit_reg, parse_256bit_reg, parse_128bit_reg, parse_64bit_reg, parse_32bit_reg, parse_16bit_reg, parse_8bit_reg, parse_mmx_reg, parse_x87_reg, parse_vec_reg, parse_long_ptr_reg, }; mod scale; pub use self::scale::{ Scale, parse_scale }; mod disp; pub use self::disp::{ Disp, parse_const };
use friday_vendor; use friday_vendor::DispatchResponse; use friday_vendor::Vendor; use friday_signal; use friday_signal::core::{Signal, Listening, Inference, Dispatch}; use friday_audio; use friday_audio::recorder::Recorder; use friday_vad; use friday_vad::core::{SpeakDetector, VADResponse}; use friday_inference; use friday_inference::Model; use friday_logging; use friday_error::log_if_err; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use ctrlc; fn dispatch<V>(vendors: &Vec<Box<V>>, class: String) where V: Vendor + ?Sized { for vendor in vendors.iter() { match vendor.dispatch(&class) { Ok(dispatch_response) => match dispatch_response { DispatchResponse::Success => (), DispatchResponse::NoMatch => () }, Err(err) => friday_logging::error!( "Failed to dispatch {} - Reason: {:?}", vendor.name(), err) } } } pub fn serve_friday<M, S, V, R>( vad: &mut S, model: &mut M, vendors: &Vec<Box<V>>, shared_istream: Arc<Mutex<R>>, mut signal_device: Box<dyn friday_signal::core::Device>) where M: Model, S: SpeakDetector, V: Vendor + ?Sized, R: Recorder { // Create interrupt handler let running = Arc::new(AtomicBool::new(true)); let r = running.clone(); ctrlc::set_handler(move || { r.store(false, Ordering::SeqCst); }).expect("Error setting Ctrl-C handler"); friday_logging::info!("Purging some audio... (takes 2 seconds)"); std::thread::sleep(std::time::Duration::from_millis(2000)); // State to keep track if previous audio was inferred on // If we go from inference to silence we reset any state the model might have. let mut previous_was_inference = false; // Run forever-loop friday_logging::info!("Listening.."); // Friday is listening log_if_err!(signal_device.send(&Signal::Listening(Listening::Start))); while running.load(Ordering::SeqCst) { std::thread::sleep(std::time::Duration::from_millis(250)); match shared_istream.lock() { Err(err) => { friday_logging::fatal!("Error occurred when aquiring istream mutex {:?}", err); // Recovering from this is essentially restarting the assistant so we just // break to exit break; } Ok(istream) => match istream.read() { Some(audio) => { match vad.detect(&audio) { VADResponse::Voice => { // VAD got voice so we will run inference // as long as this is true the model gets to keep its // state. previous_was_inference = true; // Friday starts inferring log_if_err!(signal_device.send(&Signal::Inference(Inference::Start))); match model.predict(&audio) { Ok(prediction) => match prediction { friday_inference::Prediction::Result{ class, } => { friday_logging::info!("Dispatching {}", class); // Friday starts dispatching log_if_err!(signal_device.send(&Signal::Dispatch(Dispatch::Start))); //Sleep to clear the replay buffer //std::thread::sleep(std::time::Duration::from_millis(2000)); // Dispatch the command dispatch(vendors, class); // Friday stops dispatching log_if_err!(signal_device.send(&Signal::Dispatch(Dispatch::Stop))); // Clear buffer of any trace of the signal // that trigged this command match istream.clear() { Err(err) => { friday_logging::fatal!( "Failed to clear audio buffer, Reason: {:?} \ exiting..", err); break; }, Ok(()) => () }; }, friday_inference::Prediction::Silence => (), friday_inference::Prediction::Inconclusive => () }, Err(err) => friday_logging::error!( "Failed to do inference - Reason: {:?}", err) } }, VADResponse::Silence => { if previous_was_inference { log_if_err!(signal_device.send(&Signal::Inference(Inference::Stop))); match model.reset() { Ok(()) => friday_logging::debug!("Model was reset"), Err(err) => { friday_logging::fatal!( "Failed to reset model, Reason: {:?} \ exiting..", err); break; } } } previous_was_inference = false; } } }, None => friday_logging::error!("(main) Failed to read audio") } } } log_if_err!(signal_device.send(&Signal::Listening(Listening::Stop))); }