text
stringlengths
8
4.13M
use bonuses; use bonuses::BonusTracker; pub use self::charisma::{ Charisma }; pub use self::constitution::{ Constitution }; pub use self::dexterity::{ Dexterity }; pub use self::intelligence::{ Intelligence }; pub use self::strength::{ Strength }; pub use self::wisdom::{ Wisdom }; mod charisma; mod constitution; mod dexterity; mod intelligence; mod strength; mod wisdom; pub trait AbilityScore { fn total(&self) -> Option<u8> where Self: Sized; fn bonus(&self) -> u8 where Self: Sized; fn modifier(&self) -> i8 where Self: Sized; fn get_base(&self) -> &Option<u8> where Self: Sized; fn set_base(&mut self, Option<u8>) -> &mut Self where Self: Sized; fn add_penalty(&mut self, u8) -> &mut Self where Self: Sized; fn remove_penalty(&mut self, u8) -> &mut Self where Self: Sized; fn get_penalty(&mut self) -> u8 where Self: Sized; fn add_enhancement(&mut self, u8) -> &mut Self where Self: Sized; fn remove_enhancement(&mut self, u8) -> &mut Self where Self: Sized; fn get_enhancement(&mut self) -> u8 where Self: Sized; fn add_inherent(&mut self, u8) -> &mut Self where Self: Sized; fn remove_inherent(&mut self, u8) -> &mut Self where Self: Sized; fn get_inherent(&mut self) -> u8 where Self: Sized; fn add_untyped(&mut self, u8) -> &mut Self where Self: Sized; fn remove_untyped(&mut self, u8) -> &mut Self where Self: Sized; fn get_untyped(&mut self) -> u8 where Self: Sized; } struct GenericAbilityScore { base: Option<u8>, penalties: bonuses::Penalty, enhancement_bonus: bonuses::EnhancementBonus, inherent_bonus: bonuses::InherentBonus, untyped_bonus: bonuses::UntypedBonus, } impl Default for GenericAbilityScore { fn default() -> GenericAbilityScore { GenericAbilityScore { base: Some(10), penalties: bonuses::Penalty::new(), enhancement_bonus: bonuses::EnhancementBonus::new(), inherent_bonus: bonuses::InherentBonus::new(), untyped_bonus: bonuses::UntypedBonus::new(), } } } impl GenericAbilityScore { fn new(base_score: Option<u8>) -> GenericAbilityScore { GenericAbilityScore { base: base_score, penalties: bonuses::Penalty::new(), enhancement_bonus: bonuses::EnhancementBonus::new(), inherent_bonus: bonuses::InherentBonus::new(), untyped_bonus: bonuses::UntypedBonus::new(), } } fn get_modifier(score: Option<u8>) -> i8 { match score { Some(value) => { let cast_value: f32 = value as f32; let total = cast_value / 2.0 - 5.0; return total.floor() as i8; }, None => { return 0; }, } } } impl AbilityScore for GenericAbilityScore { fn total(&self) -> Option<u8> { match self.base { None => { return None }, Some(base) => { let mut running_total = base as i8; running_total -= self.penalties.total() as i8; running_total += self.enhancement_bonus.total() as i8; running_total += self.inherent_bonus.total() as i8; running_total += self.untyped_bonus.total() as i8; if 0 < running_total { return Some(running_total as u8); } return Some(0); }, } } fn bonus(&self) -> u8 { let modifier = GenericAbilityScore::get_modifier(self.base); if 0 > modifier { return 0; } else { return modifier as u8; } } fn modifier(&self) -> i8 { GenericAbilityScore::get_modifier(self.base) } fn get_base(&self) -> &Option<u8> { &self.base } fn set_base(&mut self, base: Option<u8>) -> &mut GenericAbilityScore { self.base = base; return self; } fn add_penalty(&mut self, penalty: u8) -> &mut GenericAbilityScore { self.penalties.add(penalty); return self; } fn remove_penalty(&mut self, penalty: u8) -> &mut GenericAbilityScore { self.penalties.remove(penalty); return self; } fn get_penalty(&mut self) -> u8 { return self.penalties.total(); } fn add_enhancement(&mut self, bonus: u8) -> &mut GenericAbilityScore { self.enhancement_bonus.add(bonus); return self; } fn remove_enhancement(&mut self, bonus: u8) -> &mut GenericAbilityScore { self.enhancement_bonus.remove(bonus); return self; } fn get_enhancement(&mut self) -> u8 { return self.enhancement_bonus.total(); } fn add_inherent(&mut self, bonus: u8) -> &mut GenericAbilityScore { self.inherent_bonus.add(bonus); return self; } fn remove_inherent(&mut self, bonus: u8) -> &mut GenericAbilityScore { self.inherent_bonus.remove(bonus); return self; } fn get_inherent(&mut self) -> u8 { return self.inherent_bonus.total(); } fn add_untyped(&mut self, bonus: u8) -> &mut GenericAbilityScore { self.untyped_bonus.add(bonus); return self; } fn remove_untyped(&mut self, bonus: u8) -> &mut GenericAbilityScore { self.untyped_bonus.remove(bonus); return self; } fn get_untyped(&mut self) -> u8 { return self.untyped_bonus.total(); } } #[cfg(test)] mod genericabilityscoretests { use super::*; use ability_scores; #[test] fn default_get_base() { // the default abilityscore should be 10 let score = ability_scores::GenericAbilityScore::default(); let expected_value: Option<u8> = Some(10); assert_eq!(&expected_value, score.get_base()); } #[test] fn new_get_base() { // get_base should return the score set in the constructor let default_base: Option<u8> = Some(18); let score = ability_scores::GenericAbilityScore::new(default_base); assert_eq!(&default_base, score.get_base()); } #[test] fn set_base_get_base() { // get_base should return the score set in set_base let mut score = ability_scores::GenericAbilityScore::default(); let new_base: Option<u8> = Some(18); score.set_base(new_base); assert_eq!(&new_base, score.get_base()); } #[test] fn none_bonus() { let score = ability_scores::GenericAbilityScore::new(None); let expected_bonus: u8 = 0; assert_eq!(expected_bonus, score.bonus()); } #[test] fn zero_bonus() { // null ability scores have a bonus of 0 let score = ability_scores::GenericAbilityScore::new(Some(0)); let expected_bonus: u8 = 0; assert_eq!(expected_bonus, score.bonus()); } #[test] fn nine_bonus() { // Ability scores less than 10 have a bonus of 0 let score = ability_scores::GenericAbilityScore::new(Some(9)); let expected_bonus: u8 = 0; assert_eq!(expected_bonus, score.bonus()); } #[test] fn ten_bonus() { // Ability score of 10 has a bonus of 0 let score = ability_scores::GenericAbilityScore::new(Some(10)); let expected_bonus: u8 = 0; assert_eq!(expected_bonus, score.bonus()); } #[test] fn eleven_bonus() { // Ability score of 11 has a bonus of 0 let score = ability_scores::GenericAbilityScore::new(Some(11)); let expected_bonus: u8 = 0; assert_eq!(expected_bonus, score.bonus()); } #[test] fn twelve_bonus() { // Ability score of 12 has a bonus of 1 let score = ability_scores::GenericAbilityScore::new(Some(12)); let expected_bonus: u8 = 1; assert_eq!(expected_bonus, score.bonus()); } #[test] fn eighteen_bonus() { // Ability score of 18 has a bonus of 1 let score = ability_scores::GenericAbilityScore::new(Some(18)); let expected_bonus: u8 = 4; assert_eq!(expected_bonus, score.bonus()); } #[test] fn zero_modifier() { // null ability scores have a modifier of 0 let score = ability_scores::GenericAbilityScore::new(Some(0)); let expected_bonus: i8 = -5; assert_eq!(expected_bonus, score.modifier()); } #[test] fn nine_modifier() { // Ability scores of 9 has a modifier of -1 let score = ability_scores::GenericAbilityScore::new(Some(9)); let expected_bonus: i8 = -1; assert_eq!(expected_bonus, score.modifier()); } #[test] fn ten_modifier() { // Ability score of 10 has a modifier of 0 let score = ability_scores::GenericAbilityScore::new(Some(10)); let expected_bonus: i8 = 0; assert_eq!(expected_bonus, score.modifier()); } #[test] fn eleven_modifier() { // Ability score of 11 has a modifier of 0 let score = ability_scores::GenericAbilityScore::new(Some(11)); let expected_bonus: i8 = 0; assert_eq!(expected_bonus, score.modifier()); } #[test] fn twelve_modifier() { // Ability score of 12 has a modifier of 1 let score = ability_scores::GenericAbilityScore::new(Some(12)); let expected_bonus: i8 = 1; assert_eq!(expected_bonus, score.modifier()); } #[test] fn eighteen_modifier() { // Ability score of 18 has a modifier of 1 let score = ability_scores::GenericAbilityScore::new(Some(18)); let expected_bonus: i8 = 4; assert_eq!(expected_bonus, score.modifier()); } #[test] fn ten_base_total() { // total should return the score set in the constructor let base: Option<u8> = Some(10); let score = ability_scores::GenericAbilityScore::new(base); assert_eq!(base, score.total()); } #[test] fn eighteen_base_total() { // total should return the score set in the constructor let base: Option<u8> = Some(18); let score = ability_scores::GenericAbilityScore::new(base); assert_eq!(base, score.total()); } #[test] fn penalty_add_total() { // total should return the sume of the base score and the enhancement bonus let base: Option<u8> = Some(10); let mut score = ability_scores::GenericAbilityScore::new(base); score.add_penalty(2); let expected_total: Option<u8> = Some(8); assert_eq!(expected_total, score.total()); } #[test] fn penalty_add_negative_total() { // total should return the sume of the base score and the enhancement bonus let base: Option<u8> = Some(10); let mut score = ability_scores::GenericAbilityScore::new(base); score.add_penalty(11); let expected_total: Option<u8> = Some(0); assert_eq!(expected_total, score.total()); } #[test] fn penalty_remove_total() { // total should return the sume of the base score and the enhancement bonus let base: Option<u8> = Some(10); let mut score = ability_scores::GenericAbilityScore::new(base); score.add_penalty(2); score.remove_penalty(2); assert_eq!(base, score.total()); } #[test] fn enhancement_add_total() { // total should return the sume of the base score and the enhancement bonus let base: Option<u8> = Some(10); let mut score = ability_scores::GenericAbilityScore::new(base); score.add_enhancement(2); let expected_total: Option<u8> = Some(12); assert_eq!(expected_total, score.total()); } #[test] fn enhancement_remove_total() { // total should return the sume of the base score and the enhancement bonus let base: Option<u8> = Some(10); let mut score = ability_scores::GenericAbilityScore::new(base); score.add_enhancement(2); score.remove_enhancement(2); assert_eq!(base, score.total()); } #[test] fn inherent_add_total() { // total should return the sume of the base score and the inherent bonus let base: Option<u8> = Some(10); let mut score = ability_scores::GenericAbilityScore::new(base); score.add_inherent(2); let expected_total: Option<u8> = Some(12); assert_eq!(expected_total, score.total()); } #[test] fn inherent_remove_total() { // total should return the sume of the base score and the inherent bonus let base: Option<u8> = Some(10); let mut score = ability_scores::GenericAbilityScore::new(base); score.add_inherent(2); score.remove_inherent(2); assert_eq!(base, score.total()); } #[test] fn untyped_add_total() { // total should return the sume of the base score and the untyped bonus let base: Option<u8> = Some(10); let mut score = ability_scores::GenericAbilityScore::new(base); score.add_untyped(2); let expected_total: Option<u8> = Some(12); assert_eq!(expected_total, score.total()); } #[test] fn untyped_remove_total() { // total should return the sume of the base score and the untyped bonus let base: Option<u8> = Some(10); let mut score = ability_scores::GenericAbilityScore::new(base); score.add_untyped(2); score.remove_untyped(2); assert_eq!(base, score.total()); } }
pub mod file;
use ::std::*; /** --- Day 5: Sunny with a Chance of Asteroids --- You're starting to sweat as the ship makes its way toward Mercury. The Elves suggest that you get the air conditioner working by upgrading your ship computer to support the Thermal Environment Supervision Terminal. The Thermal Environment Supervision Terminal (TEST) starts by running a diagnostic program (your puzzle input). The TEST diagnostic program will run on your existing Intcode computer after a few modifications: First, you'll need to add two new instructions: Opcode 3 takes a single integer as input and saves it to the position given by its only parameter. For example, the instruction 3,50 would take an input value and store it at address 50. Opcode 4 outputs the value of its only parameter. For example, the instruction 4,50 would output the value at address 50. Programs that use these instructions will come with documentation that explains what should be connected to the input and output. The program 3,0,4,0,99 outputs whatever it gets as input, then halts. Second, you'll need to add support for parameter modes: Each parameter of an instruction is handled based on its parameter mode. Right now, your ship computer already understands parameter mode 0, position mode, which causes the parameter to be interpreted as a position - if the parameter is 50, its value is the value stored at address 50 in memory. Until now, all parameters have been in position mode. Now, your ship computer will also need to handle parameters in mode 1, immediate mode. In immediate mode, a parameter is interpreted as a value - if the parameter is 50, its value is simply 50. Parameter modes are stored in the same value as the instruction's opcode. The opcode is a two-digit number based only on the ones and tens digit of the value, that is, the opcode is the rightmost two digits of the first value in an instruction. Parameter modes are single digits, one per parameter, read right-to-left from the opcode: the first parameter's mode is in the hundreds digit, the second parameter's mode is in the thousands digit, the third parameter's mode is in the ten-thousands digit, and so on. Any missing modes are 0. For example, consider the program 1002,4,3,4,33. The first instruction, 1002,4,3,4, is a multiply instruction - the rightmost two digits of the first value, 02, indicate opcode 2, multiplication. Then, going right to left, the parameter modes are 0 (hundreds digit), 1 (thousands digit), and 0 (ten-thousands digit, not present and therefore zero): ABCDE 1002 DE - two-digit opcode, 02 == opcode 2 C - mode of 1st parameter, 0 == position mode B - mode of 2nd parameter, 1 == immediate mode A - mode of 3rd parameter, 0 == position mode, omitted due to being a leading zero This instruction multiplies its first two parameters. The first parameter, 4 in position mode, works like it did before - its value is the value stored at address 4 (33). The second parameter, 3 in immediate mode, simply has value 3. The result of this operation, 33 * 3 = 99, is written according to the third parameter, 4 in position mode, which also works like it did before - 99 is written to address 4. Parameters that an instruction writes to will never be in immediate mode. Finally, some notes: It is important to remember that the instruction pointer should increase by the number of values in the instruction after the instruction finishes. Because of the new instructions, this amount is no longer always 4. Integers can be negative: 1101,100,-1,4,0 is a valid program (find 100 + -1, store the result in position 4). The TEST diagnostic program will start by requesting from the user the ID of the system to test by running an input instruction - provide it 1, the ID for the ship's air conditioner unit. It will then perform a series of diagnostic tests confirming that various parts of the Intcode computer, like parameter modes, function correctly. For each test, it will run an output instruction indicating how far the result of the test was from the expected value, where 0 means the test was successful. Non-zero outputs mean that a function is not working correctly; check the instructions that were run before the output instruction to see which one failed. Finally, the program will output a diagnostic code and immediately halt. This final output isn't an error; an output followed immediately by a halt means the program finished. If all outputs were zero except the diagnostic code, the diagnostic program ran successfully. After providing 1 to the only input instruction and passing all the tests, what diagnostic code does the program produce? */ use num_derive::FromPrimitive; use num_traits::{pow, FromPrimitive}; use std::iter::FromIterator; #[derive(FromPrimitive)] enum OpCode { Add = 1, Multiply = 2, Input = 3, Output = 4, End = 99, } fn get_reg(ro_program: &[i32], pc: usize, parameter_mode: i32, position: usize) -> i32 { let val = ro_program[pc + position]; let digit = pow(10, position - 1); if (parameter_mode / digit) % 10 == 1 { val } else { ro_program[val as usize] } } fn execute_program(ro_program: &[i32]) -> i32 { let mut program: Vec<i32> = Vec::from_iter(ro_program.iter().cloned()); let mut pc: usize = 0; loop { let opcode = program[pc] % 100; let parameter_mode = program[pc] / 100; match FromPrimitive::from_i32(opcode) { Some(OpCode::End) => break program[0], Some(OpCode::Input) => { let r1 = program[pc + 1]; let mut ret = String::new(); io::stdin() .read_line(&mut ret) .expect("Failed to read from stdin"); program[r1 as usize] = ret.trim().parse::<i32>().expect("Not an integer"); pc += 2; } Some(OpCode::Output) => { let r1 = get_reg(&program, pc, parameter_mode, 1); println!("{}", r1); pc += 2; } Some(x) => { let target: usize = program[pc + 3] as usize; let r1 = get_reg(&program, pc, parameter_mode, 1); let r2 = get_reg(&program, pc, parameter_mode, 2); program[target] = match x { OpCode::Add => r1 + r2, OpCode::Multiply => r1 * r2, _ => 0, }; pc += 4; } None => { println!("Segfault"); break 0; } }; } } fn main() -> std::io::Result<()> { let ro_program: Vec<i32> = include_str!("../input") .split(',') .map(|s| s.parse::<i32>().expect("Not an unsigned integer")) .collect(); execute_program(&ro_program); Ok(()) }
use oxidy::server::Server; use oxidy::structs::Context; fn index(ctx: &mut Context) -> () { ctx.response.body = "".to_string(); } fn user(ctx: &mut Context) -> () { ctx.response.body = ctx.request.params.get("id").unwrap().to_string(); } fn user_post(ctx: &mut Context) -> () { ctx.response.body = "".to_string(); } fn main() { let mut app = Server::new(); app.get("/", index); app.get("/user/:id", user); app.post("/user", user_post); app.listen("0.0.0.0:3000"); }
use fugu_env::CommandType; use std::cmp::min; pub struct Selector { pub buf: Vec<(usize, CommandType)>, pub range: (usize, usize), pub max_print: usize, pub cursor: Option<usize>, } impl Default for Selector { fn default() -> Selector { Selector { max_print: 1, range: (0, 1), buf: Vec::new(), cursor: None, } } } impl Selector { pub fn new(b: Vec<(usize, CommandType)>) -> Selector { let m = min(b.len(), 15); Selector { max_print: m, range: (0, m), buf: b, cursor: None, } } pub fn get_idx(&self) -> Option<(usize, CommandType)> { let cursor = self.cursor?; Some(self.buf[cursor]) } pub fn csr_down(&mut self) { if let Some(num) = self.cursor { if num + 1 >= self.buf.len() { return; } self.cursor = Some(num + 1); if num + 1 >= self.max_print { self.range.0 += 1; self.range.1 += 1; } } else { self.cursor = Some(0); } } pub fn csr_up(&mut self) { if let Some(num) = self.cursor { if num > 0 { self.cursor = Some(num - 1); if num >= self.max_print { self.range.0 -= 1; self.range.1 -= 1; } } else { self.cursor = None; } } } }
use std::io::{self, BufRead}; const BASE: u64 = 7; const MODULUS: u64 = 20201227; fn reverse(a: u64, base: u64, modulus: u64) -> u64 { let mut current = 1; for x in 0.. { if current == a { return x; }; current *= base; current %= modulus; } unreachable!() } fn modpow(base: u64, exp: u64, modulus: u64) -> u64 { let mut result = 1; for _ in 0..exp { result *= base; result %= modulus; } result } fn main() { let args = io::stdin() .lock() .lines() .map(|x| x.ok()?.parse().ok()) .collect::<Option<Vec<_>>>() .unwrap(); let a: u64 = args[0]; let b: u64 = args[1]; let x = reverse(a, BASE, MODULUS); let result = modpow(b, x, MODULUS); println!("Part 1: {}", result); println!("Done \\o/"); }
/* chapter 4 syntax and semantics */ fn main() { // what to implement: /* fn coordinate() -> (i32, i32, i32) { // generate and return some sort of triple tuple } let (x, _, z) = coordinate(); */ fn coordinate() -> (i32, i32, i32) { // generate and return some sort of triple tuple let n = (1, 2 ,3); n } let (x, _, z) = coordinate(); println!("we have two values: {} and {}", x, z); } // output should be: /* */
// extern crate libc; extern crate core_foundation; extern crate cocoa; use cocoa::base::{ NSUInteger, selector, nil}; use cocoa::appkit::{ NSApp, NSRect, NSPoint, NSSize, NSApplication, NSWindow, NSString, NSMenu, NSMenuItem, NSTitledWindowMask, NSBackingStoreBuffered }; struct RMainWindowDelegate { mut title: ~str, height: int, width: int, app: NSApp, window: NSWindow } impl RMainWindowDelegate { pub fn new() -> RMainWindow { unsafe { self.app = NSApp(); self.window = window = NSWindow::alloc(nil).initWithContentRect_styleMask_backing_defer_( NSRect::new(NSPoint::new(0.,0.), NSSize::new(200., 200.)), NSTitledWindowMask as NSUInteger, NSBackingStoreBuffered, false ); window.cascadeTopLeftFromPoint_( NSPoint::new(20., 20.) ); window.center(); let title = NSString::alloc(nil).init_str("Howdy\0"); window.setTitle_(title); window.makeKeyAndOrderFront_(nil); } self.RMainWindow { title:"Test", height:480, width:640 } } pub fn show( &self ) { app.activateIgnoringOtherApps_(true); app.run(); } pub fn setTitle( &self, title: &str ) { self.title = title; } }
use gdnative::prelude::*; use crate::globals::Globals; use std::ops::{MulAssign, AddAssign, SubAssign}; use gdnative::api::*; #[derive(NativeClass)] #[inherit(KinematicBody)] pub struct Player { up_velocity: f32, camera_rotation: Vector2, } #[methods] impl Player { fn new(_owner: &KinematicBody) -> Player { Player { up_velocity: 0.0f32, camera_rotation: Vector2::zero(), } } #[export] fn _ready(&self, _owner: &KinematicBody) { Input::godot_singleton().set_mouse_mode(Input::MOUSE_MODE_CAPTURED); } #[export] fn _input(&mut self, owner: &KinematicBody, event: Ref<InputEvent>) { if let Some(event) = event.clone().cast::<InputEventMouseMotion>() { let event = unsafe { event.assume_safe() }; let motion: Vector2 = event.relative(); self.camera_rotation.x += motion.y * 0.0025f32; self.camera_rotation.y -= motion.x * 0.0025f32; if self.camera_rotation.x < -0.4 { self.camera_rotation.x = -0.4; } if self.camera_rotation.x > 0.4 { self.camera_rotation.x = 0.4; } } } #[export] fn _physics_process(&mut self, owner: &KinematicBody, _delta: f32) { let mut movement_direction = Vector3::new(0.0f32, 0.0f32, 0.0f32); let basis: Basis = owner.transform().basis; let mut x = basis.x(); x *= 0.1f32; let mut z = basis.z(); z *= 0.1f32; let input = Input::godot_singleton(); if input.is_action_pressed("left") { movement_direction -= x; } if input.is_action_pressed("right") { movement_direction += x; } if input.is_action_pressed("back") { movement_direction += z; } if input.is_action_pressed("forward") { movement_direction -= z; } if let Some(_collision) = owner.move_and_collide(movement_direction, false, false, false) { self.up_velocity = 0.0f32; if Input::godot_singleton().is_action_pressed("jump") { self.up_velocity = 1.0f32; } } else { self.up_velocity -= 0.1f32; } owner.set_rotation(Vector3::new(0.0f32, self.camera_rotation.y, 0.0f32)); unsafe { if let Some(camera_rotation_node) = owner.get_node("CameraRotation") { if let Some(camera_rotation_spatial) = camera_rotation_node.assume_safe().cast::<Spatial>() { camera_rotation_spatial.set_rotation(Vector3::new(-1.0f32 * self.camera_rotation.x, 0.0f32, 0.0f32)); } } } if self.up_velocity > 1.0f32 { self.up_velocity = 1.0f32; } if self.up_velocity < -1.0f32 { self.up_velocity = -1.0f32; } let gravity = Vector3::new(0.0f32, self.up_velocity, 0.0f32); owner.move_and_collide(gravity, false, false, false); } fn kill(&self, owner: &KinematicBody) { let rust_game_state = owner .get_tree() .and_then(|tree| { let tree = unsafe { tree.assume_safe() }; tree.root() }) .and_then(|root| { let root = unsafe { root.assume_safe() }; root.get_node("./Globals") }) .and_then(|node| { let node = unsafe { node.assume_unique() }; Instance::<Globals, _>::try_from_base(node).ok() }) .expect("Failed to get Globals"); rust_game_state .map_mut(|gs, _| gs.increment_kills()) .expect("Could not increment kills for some reason."); } }
use crate::db::money_nodes as db_items; use crate::db::PgPool; use crate::models::money_node::{ InputMoneyNode as NewItem, InputUpdateMoneyNode as UpdateInputItem, NewMoneyNode as Item, UpdateMoneyNode as UpdateItem, }; use actix_web::web::ServiceConfig; use actix_web::{delete, get, patch, post, web, Error, HttpResponse}; pub fn endpoints(config: &mut ServiceConfig) { config .service(get_all) .service(get_by_id) .service(new_debug) .service(update_by_id_debug) .service(delete_by_id_debug); } #[get("api/money_nodes")] pub async fn get_all(pool: web::Data<PgPool>) -> Result<HttpResponse, Error> { let conn = pool.get().unwrap(); let item_list = db_items::all(&conn).unwrap(); Ok(HttpResponse::Ok().json(item_list)) } #[get("/api/money_nodes/{id}")] pub async fn get_by_id( pool: web::Data<PgPool>, web::Path(id): web::Path<i32>, ) -> Result<HttpResponse, Error> { let conn = pool.get().unwrap(); let item = db_items::by_id(&conn, id).unwrap(); Ok(HttpResponse::Ok().json(item)) } /// post for money nodes should not be used directly, hence debug #[post("/api/d/money-nodes")] pub async fn new_debug( pool: web::Data<PgPool>, web::Path(item): web::Path<NewItem>, ) -> Result<HttpResponse, Error> { let conn = pool.get().unwrap(); let item = db_items::new(&conn, Item::from_input(item)).unwrap(); Ok(HttpResponse::Ok().json(item)) } /// patch for money nodes should not be used directly, hence debug #[patch("/api/d/money_nodes/{id}")] pub async fn update_by_id_debug( pool: web::Data<PgPool>, web::Json(item): web::Json<UpdateInputItem>, web::Path(id): web::Path<i32>, ) -> Result<HttpResponse, Error> { let conn = pool.get().unwrap(); let item = db_items::update(&conn, UpdateItem::from_input(item), id).unwrap(); Ok(HttpResponse::Ok().json(item)) } /// delete for money nodes should not be used directly, hence debug #[delete("/api/d/money_nodes/{id}")] pub async fn delete_by_id_debug( pool: web::Data<PgPool>, web::Path(id): web::Path<i32>, ) -> Result<HttpResponse, Error> { let conn = pool.get().unwrap(); let item = db_items::delete(&conn, id).unwrap(); Ok(HttpResponse::Ok().json(item)) }
use std::cmp::{max, min}; use clap::{crate_version, App, Arg}; use rofuse::{ FileAttr, FileType, Filesystem, MountOption, ReplyAttr, ReplyData, ReplyDirectory, ReplyEntry, Request, }; use memmap2::{Mmap, MmapOptions}; use libc::ENOENT; use std::ffi::OsStr; use std::time::{Duration, UNIX_EPOCH}; use std::io::{Result, Error, Read, Seek}; use std::fs::File; use std::os::unix::fs::FileExt; const MAX: i32 = 4 * 1024 *1024; const TTL: Duration = Duration::from_secs(1); // 1 second static ATTRS: [FileAttr; 2] = [ FileAttr { ino: 1, size: 0, blocks: 0, atime: UNIX_EPOCH, // 1970-01-01 00:00:00 mtime: UNIX_EPOCH, ctime: UNIX_EPOCH, crtime: UNIX_EPOCH, kind: FileType::Directory, perm: 0o755, nlink: 2, uid: 501, gid: 20, rdev: 0, flags: 0, blksize: 512, }, FileAttr { ino: 2, size: 65535, blocks: 1, atime: UNIX_EPOCH, // 1970-01-01 00:00:00 mtime: UNIX_EPOCH, ctime: UNIX_EPOCH, crtime: UNIX_EPOCH, kind: FileType::RegularFile, perm: 0o644, nlink: 1, uid: 501, gid: 20, rdev: 0, flags: 0, blksize: 512, }, ]; struct Zero { file: File, attrs: Vec<FileAttr>, buffer: Mmap, } unsafe fn zero(name: String) -> Result<Zero> { let mut attrs = Vec::from(ATTRS); let mut file = File::open(&name)?; attrs[1].size = file.metadata()?.len(); let ans = memmap::MmapOptions::new().map(&file)?; println!("mmap len {}", ans.len()); return Ok(Zero{ file: file, attrs: attrs, buffer: ans, }) } impl Filesystem for Zero { fn lookup(&mut self, _req: &Request, parent: u64, name: &OsStr, reply: ReplyEntry) { if parent == 1 && name.to_str() == Some("hello.txt") { reply.entry(&TTL, &self.attrs[1], 0); } else { reply.error(ENOENT); } } fn getattr(&mut self, _req: &Request, ino: u64, reply: ReplyAttr) { match ino { 1 | 2 => reply.attr(&TTL, &self.attrs[(ino - 1) as usize]), _ => reply.error(ENOENT), } } fn readdir( &mut self, _req: &Request, ino: u64, _fh: u64, offset: i64, mut reply: ReplyDirectory, ) { match ino { 1 => { vec![ (1, FileType::Directory, "."), (1, FileType::Directory, ".."), (2, FileType::RegularFile, "hello.txt"), ] .iter() .enumerate() .all(|(index, entry)| reply.add(entry.0, (index + 1) as i64, entry.1, entry.2)); reply.ok(); } _ => reply.error(ENOENT), } } fn read( &mut self, _req: &Request, ino: u64, _fh: u64, offset: i64, _size: u32, _flags: i32, _lock: Option<u64>, reply: ReplyData, ) { match ino { 2 => { let end = min(offset as usize + _size as usize, self.buffer.len() as usize); let vec = self.buffer[offset as usize..end].to_owned(); reply.data(&vec); } _ => reply.error(ENOENT), } } } fn main() { let matches = App::new("hello") .version(crate_version!()) .author("Christopher Berner") .arg( Arg::with_name("MOUNT_POINT") .required(true) .index(1) .help("Act as a client, and mount FUSE at given path"), ) .arg( Arg::with_name("auto_unmount") .long("auto_unmount") .help("Automatically unmount on process exit"), ) .arg( Arg::with_name("allow-root") .long("allow-root") .help("Allow root user to access filesystem"), ).arg( Arg::with_name("datafile") .long("data-file").required(true).takes_value(true) .help("data-file for fuse server"), ).get_matches(); env_logger::init(); let mountpoint = matches.value_of("MOUNT_POINT").unwrap(); let file = matches.value_of("datafile").unwrap(); let mut options = vec![MountOption::RO, MountOption::FSName("hello".to_string())]; if matches.is_present("auto_unmount") { options.push(MountOption::AutoUnmount); } if matches.is_present("allow-root") { options.push(MountOption::AllowRoot); } unsafe { rofuse::mount2(zero(file.to_string()).unwrap(), mountpoint, &options).unwrap(); } }
// This file is part of Substrate. // Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Trie-based state machine backend. use crate::{debug, warn}; use crate::{ trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}, Backend, StorageKey, StorageValue, }; use codec::{Codec, Decode}; use hash_db::Hasher; use sp_core::storage::{ChildInfo, ChildType}; use sp_std::{boxed::Box, vec::Vec}; use sp_trie::trie_types::{Layout, TrieDB, TrieError}; use sp_trie::{child_delta_trie_root, delta_trie_root, empty_child_trie_root, Trie}; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend<S: TrieBackendStorage<H>, H: Hasher> { pub(crate) essence: TrieBackendEssence<S, H>, } impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackend<S, H> where H::Out: Codec, { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { TrieBackend { essence: TrieBackendEssence::new(storage, root) } } /// Get backend essence reference. pub fn essence(&self) -> &TrieBackendEssence<S, H> { &self.essence } /// Get backend storage reference. pub fn backend_storage(&self) -> &S { self.essence.backend_storage() } /// Get backend storage reference. pub fn backend_storage_mut(&mut self) -> &mut S { self.essence.backend_storage_mut() } /// Get trie root. pub fn root(&self) -> &H::Out { self.essence.root() } /// Consumes self and returns underlying storage. pub fn into_storage(self) -> S { self.essence.into_storage() } } impl<S: TrieBackendStorage<H>, H: Hasher> sp_std::fmt::Debug for TrieBackend<S, H> { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { write!(f, "TrieBackend") } } impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where H::Out: Ord + Codec, { type Error = crate::DefaultError; type Transaction = S::Overlay; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result<Option<StorageValue>, Self::Error> { self.essence.storage(key) } fn child_storage( &self, child_info: &ChildInfo, key: &[u8], ) -> Result<Option<StorageValue>, Self::Error> { self.essence.child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result<Option<StorageKey>, Self::Error> { self.essence.next_storage_key(key) } fn next_child_storage_key( &self, child_info: &ChildInfo, key: &[u8], ) -> Result<Option<StorageKey>, Self::Error> { self.essence.next_child_storage_key(child_info, key) } fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) { self.essence.for_keys_with_prefix(prefix, f) } fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F) { self.essence.for_key_values_with_prefix(prefix, f) } fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, child_info: &ChildInfo, f: F) { self.essence.for_keys_in_child_storage(child_info, f) } fn for_child_keys_with_prefix<F: FnMut(&[u8])>( &self, child_info: &ChildInfo, prefix: &[u8], f: F, ) { self.essence.for_child_keys_with_prefix(child_info, prefix, f) } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { let collect_all = || -> Result<_, Box<TrieError<H::Out>>> { let trie = TrieDB::<H>::new(self.essence(), self.essence.root())?; let mut v = Vec::new(); for x in trie.iter()? { let (key, value) = x?; v.push((key.to_vec(), value.to_vec())); } Ok(v) }; match collect_all() { Ok(v) => v, Err(e) => { debug!(target: "trie", "Error extracting trie values: {}", e); Vec::new() }, } } fn keys(&self, prefix: &[u8]) -> Vec<StorageKey> { let collect_all = || -> Result<_, Box<TrieError<H::Out>>> { let trie = TrieDB::<H>::new(self.essence(), self.essence.root())?; let mut v = Vec::new(); for x in trie.iter()? { let (key, _) = x?; if key.starts_with(prefix) { v.push(key.to_vec()); } } Ok(v) }; collect_all() .map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)) .unwrap_or_default() } fn storage_root<'a>( &self, delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>, ) -> (H::Out, Self::Transaction) where H::Out: Ord, { let mut write_overlay = S::Overlay::default(); let mut root = *self.essence.root(); { let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); match delta_trie_root::<Layout<H>, _, _, _, _, _>(&mut eph, root, delta) { Ok(ret) => root = ret, Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } } (root, write_overlay) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord, { let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::<Layout<H>>(), }; let mut write_overlay = S::Overlay::default(); let prefixed_storage_key = child_info.prefixed_storage_key(); let mut root = match self.storage(prefixed_storage_key.as_slice()) { Ok(value) => value .and_then(|r| Decode::decode(&mut &r[..]).ok()) .unwrap_or_else(|| default_root.clone()), Err(e) => { warn!(target: "trie", "Failed to read child storage root: {}", e); default_root.clone() }, }; { let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); match child_delta_trie_root::<Layout<H>, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, root, delta, ) { Ok(ret) => root = ret, Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } } let is_default = root == default_root; (root, is_default, write_overlay) } fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, H>> { Some(self) } fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) {} fn usage_info(&self) -> crate::UsageInfo { crate::UsageInfo::empty() } fn wipe(&self) -> Result<(), Self::Error> { Ok(()) } } #[cfg(test)] pub mod tests { use super::*; use codec::Encode; use sp_core::H256; use sp_runtime::traits::BlakeTwo256; use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; use std::{collections::HashSet, iter}; const CHILD_KEY_1: &[u8] = b"sub1"; fn test_db() -> (PrefixedMemoryDB<BlakeTwo256>, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::<BlakeTwo256>::default(); { let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); }; { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(child_info.prefixed_storage_key().as_slice(), &sub_root[..]) .expect("insert failed"); trie.insert(b"key", b"value").expect("insert failed"); trie.insert(b"value1", &[42]).expect("insert failed"); trie.insert(b"value2", &[24]).expect("insert failed"); trie.insert(b":code", b"return 42").expect("insert failed"); for i in 128u8..255u8 { trie.insert(&[i], &[i]).unwrap(); } } (mdb, root) } pub(crate) fn test_trie() -> TrieBackend<PrefixedMemoryDB<BlakeTwo256>, BlakeTwo256> { let (mdb, root) = test_db(); TrieBackend::new(mdb, root) } #[test] fn read_from_storage_returns_some() { assert_eq!(test_trie().storage(b"key").unwrap(), Some(b"value".to_vec())); } #[test] fn read_from_child_storage_returns_some() { let test_trie = test_trie(); assert_eq!( test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(), Some(vec![142u8]), ); } #[test] fn read_from_storage_returns_none() { assert_eq!(test_trie().storage(b"non-existing-key").unwrap(), None); } #[test] fn pairs_are_not_empty_on_non_empty_storage() { assert!(!test_trie().pairs().is_empty()); } #[test] fn pairs_are_empty_on_empty_storage() { assert!(TrieBackend::<PrefixedMemoryDB<BlakeTwo256>, BlakeTwo256>::new( PrefixedMemoryDB::default(), Default::default(), ) .pairs() .is_empty()); } #[test] fn storage_root_is_non_default() { assert!(test_trie().storage_root(iter::empty()).0 != H256::repeat_byte(0)); } #[test] fn storage_root_transaction_is_empty() { assert!(test_trie().storage_root(iter::empty()).1.drain().is_empty()); } #[test] fn storage_root_transaction_is_non_empty() { let (new_root, mut tx) = test_trie().storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..])))); assert!(!tx.drain().is_empty()); assert!(new_root != test_trie().storage_root(iter::empty()).0); } #[test] fn prefix_walking_works() { let trie = test_trie(); let mut seen = HashSet::new(); trie.for_keys_with_prefix(b"value", |key| { let for_first_time = seen.insert(key.to_vec()); assert!(for_first_time, "Seen key '{:?}' more than once", key); }); let mut expected = HashSet::new(); expected.insert(b"value1".to_vec()); expected.insert(b"value2".to_vec()); assert_eq!(seen, expected); } }
#[doc = r"Register block"] #[repr(C)] pub struct RegisterBlock { #[doc = "0x00 - Ethernet DMA bus mode register"] pub dmabmr: DMABMR, #[doc = "0x04 - Ethernet DMA transmit poll demand register"] pub dmatpdr: DMATPDR, #[doc = "0x08 - EHERNET DMA receive poll demand register"] pub dmarpdr: DMARPDR, #[doc = "0x0c - Ethernet DMA receive descriptor list address register"] pub dmardlar: DMARDLAR, #[doc = "0x10 - Ethernet DMA transmit descriptor list address register"] pub dmatdlar: DMATDLAR, #[doc = "0x14 - Ethernet DMA status register"] pub dmasr: DMASR, #[doc = "0x18 - Ethernet DMA operation mode register"] pub dmaomr: DMAOMR, #[doc = "0x1c - Ethernet DMA interrupt enable register"] pub dmaier: DMAIER, #[doc = "0x20 - Ethernet DMA missed frame and buffer overflow counter register"] pub dmamfbocr: DMAMFBOCR, #[doc = "0x24 - Ethernet DMA receive status watchdog timer register"] pub dmarswtr: DMARSWTR, _reserved10: [u8; 0x20], #[doc = "0x48 - Ethernet DMA current host transmit descriptor register"] pub dmachtdr: DMACHTDR, #[doc = "0x4c - Ethernet DMA current host receive descriptor register"] pub dmachrdr: DMACHRDR, #[doc = "0x50 - Ethernet DMA current host transmit buffer address register"] pub dmachtbar: DMACHTBAR, #[doc = "0x54 - Ethernet DMA current host receive buffer address register"] pub dmachrbar: DMACHRBAR, } #[doc = "DMABMR (rw) register accessor: Ethernet DMA bus mode register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmabmr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmabmr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmabmr`] module"] pub type DMABMR = crate::Reg<dmabmr::DMABMR_SPEC>; #[doc = "Ethernet DMA bus mode register"] pub mod dmabmr; #[doc = "DMATPDR (rw) register accessor: Ethernet DMA transmit poll demand register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmatpdr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmatpdr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmatpdr`] module"] pub type DMATPDR = crate::Reg<dmatpdr::DMATPDR_SPEC>; #[doc = "Ethernet DMA transmit poll demand register"] pub mod dmatpdr; #[doc = "DMARPDR (rw) register accessor: EHERNET DMA receive poll demand register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmarpdr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmarpdr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmarpdr`] module"] pub type DMARPDR = crate::Reg<dmarpdr::DMARPDR_SPEC>; #[doc = "EHERNET DMA receive poll demand register"] pub mod dmarpdr; #[doc = "DMARDLAR (rw) register accessor: Ethernet DMA receive descriptor list address register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmardlar::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmardlar::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmardlar`] module"] pub type DMARDLAR = crate::Reg<dmardlar::DMARDLAR_SPEC>; #[doc = "Ethernet DMA receive descriptor list address register"] pub mod dmardlar; #[doc = "DMATDLAR (rw) register accessor: Ethernet DMA transmit descriptor list address register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmatdlar::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmatdlar::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmatdlar`] module"] pub type DMATDLAR = crate::Reg<dmatdlar::DMATDLAR_SPEC>; #[doc = "Ethernet DMA transmit descriptor list address register"] pub mod dmatdlar; #[doc = "DMASR (rw) register accessor: Ethernet DMA status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmasr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmasr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmasr`] module"] pub type DMASR = crate::Reg<dmasr::DMASR_SPEC>; #[doc = "Ethernet DMA status register"] pub mod dmasr; #[doc = "DMAOMR (rw) register accessor: Ethernet DMA operation mode register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmaomr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmaomr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmaomr`] module"] pub type DMAOMR = crate::Reg<dmaomr::DMAOMR_SPEC>; #[doc = "Ethernet DMA operation mode register"] pub mod dmaomr; #[doc = "DMAIER (rw) register accessor: Ethernet DMA interrupt enable register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmaier::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmaier::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmaier`] module"] pub type DMAIER = crate::Reg<dmaier::DMAIER_SPEC>; #[doc = "Ethernet DMA interrupt enable register"] pub mod dmaier; #[doc = "DMAMFBOCR (rw) register accessor: Ethernet DMA missed frame and buffer overflow counter register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmamfbocr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmamfbocr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmamfbocr`] module"] pub type DMAMFBOCR = crate::Reg<dmamfbocr::DMAMFBOCR_SPEC>; #[doc = "Ethernet DMA missed frame and buffer overflow counter register"] pub mod dmamfbocr; #[doc = "DMARSWTR (rw) register accessor: Ethernet DMA receive status watchdog timer register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmarswtr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmarswtr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmarswtr`] module"] pub type DMARSWTR = crate::Reg<dmarswtr::DMARSWTR_SPEC>; #[doc = "Ethernet DMA receive status watchdog timer register"] pub mod dmarswtr; #[doc = "DMACHTDR (r) register accessor: Ethernet DMA current host transmit descriptor register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmachtdr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmachtdr`] module"] pub type DMACHTDR = crate::Reg<dmachtdr::DMACHTDR_SPEC>; #[doc = "Ethernet DMA current host transmit descriptor register"] pub mod dmachtdr; #[doc = "DMACHRDR (r) register accessor: Ethernet DMA current host receive descriptor register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmachrdr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmachrdr`] module"] pub type DMACHRDR = crate::Reg<dmachrdr::DMACHRDR_SPEC>; #[doc = "Ethernet DMA current host receive descriptor register"] pub mod dmachrdr; #[doc = "DMACHTBAR (r) register accessor: Ethernet DMA current host transmit buffer address register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmachtbar::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmachtbar`] module"] pub type DMACHTBAR = crate::Reg<dmachtbar::DMACHTBAR_SPEC>; #[doc = "Ethernet DMA current host transmit buffer address register"] pub mod dmachtbar; #[doc = "DMACHRBAR (r) register accessor: Ethernet DMA current host receive buffer address register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmachrbar::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmachrbar`] module"] pub type DMACHRBAR = crate::Reg<dmachrbar::DMACHRBAR_SPEC>; #[doc = "Ethernet DMA current host receive buffer address register"] pub mod dmachrbar;
extern crate cryptominisat; use self::cryptominisat::*; extern crate bit_vec; use self::bit_vec::BitVec; use super::*; use std::collections::HashMap; use std::fmt; #[cfg(feature = "statistics")] use super::utils::statistics::TimingStats; type QMatrix = Matrix<TreePrefix>; pub struct CaqeSolver<'a> { matrix: &'a QMatrix, result: SolverResult, abstraction: Vec<Box<ScopeRecursiveSolver>>, } impl<'a> CaqeSolver<'a> { pub fn new(matrix: &QMatrix) -> CaqeSolver { Self::new_with_options(matrix, CaqeSolverOptions::new()) } pub fn new_with_options(matrix: &QMatrix, options: CaqeSolverOptions) -> CaqeSolver { let mut abstractions = Vec::new(); for scope_node in matrix.prefix.roots.iter() { abstractions.push(ScopeRecursiveSolver::init_abstraction_recursively( matrix, options, scope_node, )); } debug_assert!(!matrix.conflict()); CaqeSolver { matrix: matrix, result: SolverResult::Unknown, abstraction: abstractions, } } #[cfg(feature = "statistics")] pub fn print_statistics(&self) { for ref abstraction in self.abstraction.iter() { abstraction.print_statistics(); } } pub fn qdimacs_output(&self) -> qdimacs::PartialQDIMACSCertificate { let mut certificate = qdimacs::PartialQDIMACSCertificate::new( self.result, self.matrix.prefix.variables().orig_num_variables(), self.matrix.orig_clause_num, ); if self.result == SolverResult::Unknown { return certificate; } // get the first scope that contains variables (the scope 0 may be empty) let mut top_level = Vec::new(); let is_universal; if self.matrix .prefix .roots .iter() .fold(true, |val, node| val && node.scope.variables.is_empty()) { // top-level existential scope is empty for abstraction in self.abstraction.iter() { top_level.extend(&abstraction.next); } is_universal = true; } else { top_level.extend(&self.abstraction); is_universal = false; } // output the variable assignment if possible if self.result == SolverResult::Satisfiable && is_universal || self.result == SolverResult::Unsatisfiable && !is_universal { return certificate; } // go thorough all scopes in the level // for existential level: combine the assignments // for universal level: select only one level for scope in top_level.iter() { if self.result == SolverResult::Unsatisfiable && scope.data.sub_result != SolverResult::Unsatisfiable { continue; } for variable in scope.data.variables.iter() { let value = scope.data.assignments[variable]; let info = &self.matrix.prefix.variables().get(*variable); let mut orig_variable; if info.copy_of != 0 { orig_variable = info.copy_of; } else { orig_variable = *variable; } certificate.add_assignment(Literal::new(orig_variable, !value)); } if self.result == SolverResult::Unsatisfiable { // only one assignment for universal quantifier break; } } certificate } } impl<'a> super::Solver for CaqeSolver<'a> { fn solve(&mut self) -> SolverResult { for ref mut abstraction in self.abstraction.iter_mut() { let result = abstraction.solve_recursive(self.matrix); if result == SolverResult::Unsatisfiable { self.result = SolverResult::Unsatisfiable; return result; } } self.result = SolverResult::Satisfiable; return self.result; } } #[derive(Debug, Copy, Clone)] pub struct CaqeSolverOptions { pub strong_unsat_refinement: bool, pub expansion_refinement: bool, pub refinement_literal_subsumption: bool, pub abstraction_literal_optimization: bool, /// flag whether to collpase empty (universal) scopes duging mini-scoping pub collapse_empty_scopes: bool, } impl CaqeSolverOptions { pub fn new() -> CaqeSolverOptions { CaqeSolverOptions { strong_unsat_refinement: true, expansion_refinement: true, refinement_literal_subsumption: false, abstraction_literal_optimization: true, collapse_empty_scopes: false, } } } #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] enum SolverScopeEvents { SolveScopeAbstraction, Refinement, } impl fmt::Display for SolverScopeEvents { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &SolverScopeEvents::SolveScopeAbstraction => write!(f, "SolveScopeAbstraction"), &SolverScopeEvents::Refinement => write!(f, "Refinement"), } } } struct ScopeSolverData { sat: cryptominisat::Solver, variables: Vec<Variable>, variable_to_sat: HashMap<Variable, Lit>, t_literals: Vec<(ClauseId, Lit)>, b_literals: Vec<(ClauseId, Lit)>, /// lookup from sat solver variables to clause id's reverse_t_literals: HashMap<u32, ClauseId>, assignments: HashMap<Variable, bool>, /// stores for every clause whether the clause is satisfied or not by assignments to outer variables entry: BitVec, /// Stores the clauses for which the current level is maximal, i.e., /// there is no literal of a inner scope contained. /// For universal scopes, it stores the clauses which are only influenced by /// the current, or some inner, scope. max_clauses: BitVec, /// Stores the clauses which are relevant, i.e., belong to the current branch in quantifier prefix tree relevant_clauses: BitVec, /// stores the assumptions given to sat solver sat_solver_assumptions: Vec<Lit>, is_universal: bool, scope_id: ScopeId, options: CaqeSolverOptions, /// stores for clause-ids whether there is astrong-unsat optimized lit strong_unsat_cache: HashMap<ClauseId, (Lit, bool)>, conjunction: Vec<ClauseId>, /// expansion related data structures expansion_renaming: HashMap<Variable, Lit>, /// stores the result of recursive calls to branches sub_result: SolverResult, #[cfg(feature = "statistics")] statistics: TimingStats<SolverScopeEvents>, } impl ScopeSolverData { fn new( matrix: &QMatrix, options: CaqeSolverOptions, scope: &Scope, relevant_clauses: BitVec, ) -> ScopeSolverData { let mut s = cryptominisat::Solver::new(); s.set_num_threads(1); // assign all variables initially to zero, need that for expansion refinement let mut assignments = HashMap::new(); for &variable in scope.variables.iter() { assignments.insert(variable, false); } ScopeSolverData { sat: s, variables: scope.variables.clone(), variable_to_sat: HashMap::new(), t_literals: Vec::with_capacity(matrix.clauses.len()), b_literals: Vec::with_capacity(matrix.clauses.len()), reverse_t_literals: HashMap::new(), assignments: assignments, entry: BitVec::from_elem(matrix.clauses.len(), false), max_clauses: BitVec::from_elem(matrix.clauses.len(), false), relevant_clauses: relevant_clauses, sat_solver_assumptions: Vec::new(), is_universal: scope.id % 2 != 0, scope_id: scope.id, options: options, strong_unsat_cache: HashMap::new(), conjunction: Vec::new(), expansion_renaming: HashMap::new(), sub_result: SolverResult::Unknown, #[cfg(feature = "statistics")] statistics: TimingStats::new(), } } fn new_existential(&mut self, matrix: &QMatrix, scope: &Scope) { let mut sat_clause = Vec::new(); // build SAT instance for existential quantifier: abstract all literals that are not contained in quantifier into b- and t-literals 'next_clause: for (clause_id, clause) in matrix.clauses.iter().enumerate() { debug_assert!(clause.len() != 0, "unit clauses indicate a problem"); debug_assert!(sat_clause.is_empty()); let mut contains_variables = false; let mut outer = None; let mut inner = None; let mut current = None; let mut scopes = MinMax::new(); for &literal in clause.iter() { let var_scope = matrix.prefix.variables().get(literal.variable()).scope; scopes.update(var_scope); if !self.variable_to_sat.contains_key(&literal.variable()) { if var_scope < scope.id { outer = Some(literal); } else if var_scope > scope.id { inner = Some(literal); } continue; } self.relevant_clauses.set(clause_id as usize, true); current = Some(literal); contains_variables = true; sat_clause.push(self.lit_to_sat_lit(literal)); } // add t- and b-lits to existential quantifiers: // * we add t-lit if scope is between min- and max-scope of current clause // * we add b-lit if scope is between min- and max-scope of current clause, excluding max-scope let (min_scope, max_scope) = scopes.get(); let need_t_lit = contains_variables && min_scope < scope.id && scope.id <= max_scope; let need_b_lit = contains_variables && min_scope <= scope.id && scope.id < max_scope; let mut outer_equal_to = None; let mut inner_equal_to = None; if min_scope > self.scope_id { // remove the clause from relevant clauses as current scope (nor any outer) influence it self.relevant_clauses.set(clause_id as usize, false); } if !contains_variables { debug_assert!(!need_t_lit); debug_assert!(!need_b_lit); debug_assert!(sat_clause.is_empty()); continue; } else { // check if the clause is equal to another clause w.r.t. variables bound at the current level or outer // in this case, we do not need to add a clause to SAT solver, but rather just need an entry in b-literals if self.options.abstraction_literal_optimization && need_b_lit && current.is_some() { for &other_clause_id in matrix .occurrences(current.unwrap()) .filter(|&&id| id < clause_id as ClauseId) { let other_clause = &matrix.clauses[other_clause_id as usize]; if clause.is_equal_wrt_predicate(other_clause, |l| { let info = matrix.prefix.variables().get(l.variable()); info.scope <= scope.id }) { debug_assert!(need_b_lit); let pos = self.b_literals .binary_search_by(|elem| elem.0.cmp(&other_clause_id)); if pos.is_ok() { let sat_var = self.b_literals[pos.unwrap()].1; self.b_literals.push((clause_id as ClauseId, sat_var)); sat_clause.clear(); continue 'next_clause; } } } } if false && self.options.abstraction_literal_optimization && outer.is_some() { for &other_clause_id in matrix .occurrences(outer.unwrap()) .filter(|&&id| id < clause_id as ClauseId) { let other_clause = &matrix.clauses[other_clause_id as usize]; if clause.is_equal_wrt_predicate(other_clause, |l| { let info = matrix.prefix.variables().get(l.variable()); info.scope < scope.id }) { debug_assert!(need_t_lit); let pos = self.t_literals .binary_search_by(|elem| elem.0.cmp(&other_clause_id)); if pos.is_ok() { let sat_var = self.t_literals[pos.unwrap()].1; outer_equal_to = Some(sat_var); break; } } } } if false && self.options.abstraction_literal_optimization && inner.is_some() { for &other_clause_id in matrix .occurrences(inner.unwrap()) .filter(|&&id| id < clause_id as ClauseId) { let other_clause = &matrix.clauses[other_clause_id as usize]; if clause.is_equal_wrt_predicate(other_clause, |l| { let info = matrix.prefix.variables().get(l.variable()); info.scope > scope.id }) { debug_assert!(need_b_lit); let pos = self.b_literals .binary_search_by(|elem| elem.0.cmp(&other_clause_id)); if pos.is_ok() { let sat_var = self.b_literals[pos.unwrap()].1; inner_equal_to = Some(sat_var); break; } } } } } if need_t_lit { if outer_equal_to.is_none() { let t_lit = self.sat.new_var(); sat_clause.push(t_lit); self.t_literals.push((clause_id as ClauseId, t_lit)); self.reverse_t_literals .insert(t_lit.var(), clause_id as ClauseId); } else { let t_lit = outer_equal_to.unwrap(); sat_clause.push(t_lit); // we don't need to add it to t-literals since it will be already assumed by earlier clause // otherwise, we would assume twice //self.t_literals.push((clause_id as ClauseId, t_lit)); } } if need_b_lit { let b_lit; if inner_equal_to.is_none() { b_lit = self.sat.new_var(); } else { b_lit = inner_equal_to.unwrap(); } sat_clause.push(!b_lit); self.b_literals.push((clause_id as ClauseId, b_lit)); } debug_assert!(!sat_clause.is_empty()); self.sat.add_clause(sat_clause.as_ref()); sat_clause.clear(); if max_scope == scope.id { self.max_clauses.set(clause_id, true); } } debug!("Scope {}", scope.id); debug!("t-literals: {}", self.t_literals.len()); debug!("b-literals: {}", self.b_literals.len()); #[cfg(debug_assertions)] { let mut t_literals = String::new(); for &(clause_id, _) in self.t_literals.iter() { t_literals.push_str(&format!(" t{}", clause_id)); } debug!("t-literals: {}", t_literals); let mut b_literals = String::new(); for &(clause_id, _) in self.b_literals.iter() { b_literals.push_str(&format!(" b{}", clause_id)); } debug!("b-literals: {}", b_literals); } } fn new_universal(&mut self, matrix: &QMatrix, scope: &Scope) { // build SAT instance for negation of clauses, i.e., basically we only build binary clauses 'next_clause: for (clause_id, clause) in matrix.clauses.iter().enumerate() { debug_assert!(clause.len() != 0, "unit clauses indicate a problem"); let clause_id = clause_id as ClauseId; let mut scopes = MinMax::new(); // check if there is at most one variable bound in current scope (and no outer variables) // then one can replace the b-literal by the variable itself let mut single_literal = None; let mut num_scope_variables = 0; for &literal in clause.iter() { let var_scope = matrix.prefix.variables().get(literal.variable()).scope; scopes.update(var_scope); if !self.variable_to_sat.contains_key(&literal.variable()) { continue; } self.relevant_clauses.set(clause_id as usize, true); num_scope_variables += 1; if single_literal.is_none() { single_literal = Some(literal); } } let (min_scope, max_scope) = scopes.get(); // We check whether the clause is equal to a prior clause w.r.t. outer and current variables. // In this case, we can re-use the b-literal from other clause (and can omit t-literal all together). if self.options.abstraction_literal_optimization && single_literal.is_some() && (num_scope_variables > 1 || min_scope < scope.id) { let literal = single_literal.unwrap(); // iterate only over prior clauses for &other_clause_id in matrix .occurrences(literal) .filter(|&&id| id < clause_id as ClauseId) { let other_clause = &matrix.clauses[other_clause_id as usize]; if clause.is_equal_wrt_predicate(other_clause, |l| { let info = matrix.prefix.variables().get(l.variable()); info.scope <= scope.id }) { let pos = self.b_literals .binary_search_by(|elem| elem.0.cmp(&other_clause_id)) .unwrap(); let sat_var = self.b_literals[pos].1; self.b_literals.push((clause_id as ClauseId, sat_var)); continue 'next_clause; } } } let sat_var; // there is a single literal and no outer variables, replace t-literal by literal if self.options.abstraction_literal_optimization && num_scope_variables == 1 && min_scope == scope.id { let literal = single_literal.unwrap(); sat_var = !self.lit_to_sat_lit(literal); } else if num_scope_variables > 0 { // build abstraction sat_var = self.sat.new_var(); for &literal in clause.iter() { if !self.variable_to_sat.contains_key(&literal.variable()) { continue; } let lit = self.lit_to_sat_lit(literal); self.sat.add_clause(&[!sat_var, !lit]); } } else { // no variable of current scope // do not add t-literal nor b-literal, we adapt abstraction during solving if needed continue; } debug_assert!(self.relevant_clauses[clause_id as usize]); let need_t_lit = min_scope < scope.id && scope.id <= max_scope; let need_b_lit = min_scope <= scope.id && scope.id <= max_scope; debug_assert!(min_scope <= scope.id); debug_assert!(max_scope >= scope.id); if need_t_lit { self.t_literals.push((clause_id as ClauseId, sat_var)); debug_assert!(!self.reverse_t_literals.contains_key(&sat_var.var())); self.reverse_t_literals .insert(sat_var.var(), clause_id as ClauseId); } if need_b_lit { self.b_literals.push((clause_id as ClauseId, sat_var)); } if min_scope == scope.id { self.max_clauses.set(clause_id as usize, true); } } debug!("Scope {}", scope.id); debug!("t-literals: {}", self.t_literals.len()); debug!("b-literals: {}", self.b_literals.len()); #[cfg(debug_assertions)] { let mut t_literals = String::new(); for &(clause_id, _) in self.t_literals.iter() { t_literals.push_str(&format!(" t{}", clause_id)); } debug!("t-literals: {}", t_literals); let mut b_literals = String::new(); for &(clause_id, _) in self.b_literals.iter() { b_literals.push_str(&format!(" b{}", clause_id)); } debug!("b-literals: {}", b_literals); } } fn lit_to_sat_lit(&self, literal: Literal) -> Lit { let lit = self.variable_to_sat[&literal.variable()]; if literal.signed() { !lit } else { lit } } fn check_candidate_exists(&mut self, next: &mut Vec<Box<ScopeRecursiveSolver>>) -> Lbool { // we need to reset abstraction entries for next scopes, since some entries may be pushed down self.entry.intersect(&self.relevant_clauses); for ref mut scope in next { scope.data.entry.clone_from(&self.entry); } self.sat_solver_assumptions.clear(); #[cfg(debug_assertions)] let mut debug_print = String::new(); // we iterate in parallel over the entry and the t-literals of current level // there are 3 possibilities: // * clause from entry is not a t-lit: push entry to next quantifier // * clause is in entry and a t-lit: assume positively // * clause is not in entry and a t-lit: assume negatively for &(clause_id, mut t_literal) in self.t_literals.iter() { if !self.entry[clause_id as usize] { t_literal = !t_literal; } if self.is_universal { t_literal = !t_literal; } #[cfg(debug_assertions)] { if t_literal.isneg() { debug_print.push_str(&format!(" -t{}", clause_id)); } else { debug_print.push_str(&format!(" t{}", clause_id)); } } if self.is_universal && !t_literal.isneg() { // assume t-literal completely for existential quantifier // and only negatively for universal quantifier continue; } self.sat_solver_assumptions.push(t_literal); } #[cfg(debug_assertions)] debug!("assume {}", debug_print); self.sat .solve_with_assumptions(self.sat_solver_assumptions.as_ref()) } fn update_assignment(&mut self) { trace!("update_assignment"); #[cfg(debug_assertions)] let mut debug_print = String::new(); let model = self.sat.get_model(); for (&variable, &sat_var) in self.variable_to_sat.iter() { let value = match model[sat_var.var() as usize] { Lbool::True => true, Lbool::False => false, _ => panic!("expect all variables to be assigned"), }; #[cfg(debug_assertions)] { if value { debug_print.push_str(&format!(" {}", variable)); } else { debug_print.push_str(&format!(" -{}", variable)); } } let old = self.assignments.entry(variable).or_insert(value); *old = value; } #[cfg(debug_assertions)] debug!("assignment {}", debug_print); } fn get_assumptions(&mut self, matrix: &QMatrix, next: &mut Vec<Box<ScopeRecursiveSolver>>) { trace!("get_assumptions"); // assumptions in `next` were already cleared in check_candidate_exists #[cfg(debug_assertions)] let mut debug_print = String::new(); if !self.is_universal { for &(clause_id, b_lit) in self.b_literals.iter() { if self.sat.is_true(b_lit) { next.iter_mut().for_each(|ref mut scope| { scope.data.entry.set(clause_id as usize, true); }); continue; } /*debug_assert!( !self.entry[clause_id as usize] || assumptions[clause_id as usize], "entry -> assumption" );*/ if self.entry[clause_id as usize] { //debug_assert!(assumptions[clause_id as usize]); continue; } // assumption literal was set, but it may be still true that the clause is satisfied let clause = &matrix.clauses[clause_id as usize]; if clause.is_satisfied_by_assignment(&self.assignments) { next.iter_mut().for_each(|ref mut scope| { scope.data.entry.set(clause_id as usize, true); }); continue; } #[cfg(debug_assertions)] debug_print.push_str(&format!(" b{}", clause_id)); } } else { for &(clause_id, b_lit) in self.b_literals.iter() { if self.sat.is_true(b_lit) { continue; } // assumption literal was set // check if clause is falsified by current level let clause = &matrix.clauses[clause_id as usize]; let mut falsified = true; let mut nonempty = false; for literal in clause.iter() { if !self.variable_to_sat.contains_key(&literal.variable()) { // not a variable of current level continue; } nonempty = true; let value = self.assignments[&literal.variable()]; if value && !literal.signed() { falsified = false; break; } else if !value && literal.signed() { falsified = false; break; } } if nonempty && falsified { // depending on t-literal, the assumption is already set continue; /*if self.t_literals.contains_key(&clause_id) { if !self.entry[clause_id as usize] { continue; } } else { continue; }*/ } if !nonempty { debug_assert!( self.t_literals .binary_search_by(|elem| elem.0.cmp(&clause_id)) .is_ok() ); // we have already copied the value by copying current entry continue; /*if !self.entry[clause_id as usize] { continue; }*/ } next.iter_mut().for_each(|ref mut scope| { scope.data.entry.set(clause_id as usize, true); }); #[cfg(debug_assertions)] debug_print.push_str(&format!(" b{}", clause_id)); } } #[cfg(debug_assertions)] debug!("assumptions: {}", debug_print); } fn entry_minimization(&mut self, matrix: &QMatrix) { trace!("entry_minimization"); // add clauses to entry where the current scope is maximal self.entry.union(&self.max_clauses); for variable in self.variables.iter() { let value = self.assignments[variable]; let literal = Literal::new(*variable, !value); // check if assignment is needed, i.e., it can flip a bit in entry let mut needed = false; for &clause_id in matrix.occurrences(literal) { if self.entry[clause_id as usize] { needed = true; self.entry.set(clause_id as usize, false); } } if !needed { // the current value set is not needed for the entry, try other polarity for &clause_id in matrix.occurrences(-literal) { if self.entry[clause_id as usize] { self.entry.set(clause_id as usize, false); } } } } #[cfg(debug_assertions)] for (i, val) in self.entry.iter().enumerate().filter(|&(_, val)| val) { let clause = &matrix.clauses[i]; let mut min = ScopeId::max_value(); for &literal in clause.iter() { let otherscope = matrix.prefix.variables().get(literal.variable()).scope; if otherscope < min { min = otherscope; } } assert!(min < self.scope_id); } } fn refine(&mut self, matrix: &QMatrix, next: &mut Box<ScopeRecursiveSolver>) { trace!("refine"); // check if influenced by current scope /*let mut max = 0; for (i, _) in self.entry.iter().enumerate().filter(|&(_, val)| val) { let clause = &matrix.clauses[i]; for &literal in clause.iter() { let otherscope = matrix.prefix.variables().get(literal.variable()).scope; if otherscope > self.scope_id { continue; } if otherscope > max { max = otherscope; } } } if max < self.scope_id && self.scope_id > 1 { println!("{} {}", max, self.scope_id); panic!("a"); }*/ if self.options.expansion_refinement && self.is_expansion_refinement_applicable(next) { self.expansion_refinement(matrix, next); } if !self.is_universal && self.options.strong_unsat_refinement && self.strong_unsat_refinement(matrix, next) { return; } // important: refinement literal subsumption has to be after strong unsat refinement if self.options.refinement_literal_subsumption { self.refinement_literal_subsumption_optimization(matrix, next); } let entry = &next.data.entry; let blocking_clause = &mut self.sat_solver_assumptions; blocking_clause.clear(); #[cfg(debug_assertions)] let mut debug_print = String::new(); for (i, _) in entry.iter().enumerate().filter(|&(_, val)| val) { let clause_id = i as ClauseId; let b_lit = Self::add_b_lit_and_adapt_abstraction( clause_id, &mut self.sat, &self.b_literals, &mut self.t_literals, &mut self.reverse_t_literals, ); blocking_clause.push(b_lit); #[cfg(debug_assertions)] debug_print.push_str(&format!(" b{}", clause_id)); } self.sat.add_clause(blocking_clause.as_ref()); #[cfg(debug_assertions)] debug!("refinement: {}", debug_print); } /// Implements the strong unsat refinement operation. /// If successful, it can reduce the number of iterations needed. /// Returns true, if the optimization was applied, false otherwise. fn strong_unsat_refinement( &mut self, matrix: &QMatrix, next: &mut Box<ScopeRecursiveSolver>, ) -> bool { trace!("strong_unsat_refinement"); debug_assert!(!self.is_universal); let mut applied = false; // re-use sat-solver-assumptions vector let blocking_clause = &mut self.sat_solver_assumptions; blocking_clause.clear(); let entry = &next.data.entry; let scope_id = self.scope_id; // was the clause processed before? for (i, _) in entry.iter().enumerate().filter(|&(_, val)| val) { let clause_id = i as ClauseId; match self.strong_unsat_cache.get(&clause_id) { Some(&(literal, opt)) => { if opt { applied = true; } blocking_clause.push(literal); continue; } None => {} } // TODO: for implementation of stronger unsat rule (see "On Expansion and Resolution in CEGAR Based QBF Solving"), // we have to collect all universal variables from all failed clauses. // This means espacially that we cannot use our current hashing anymore // Get some random existential occurrence from clause, so we can use // the occurrence list to iterate over clauses let clause = &matrix.clauses[i]; self.conjunction.clear(); self.conjunction.push(clause_id); for &literal in clause.iter() { let info = matrix.prefix.variables().get(literal.variable()); // Consider only existential variables that have a lower level if info.is_universal() || info.scope <= self.scope_id { continue; } // Iterate over occurrence list and add equivalent clauses for &other_clause_id in matrix.occurrences(literal) { let other_clause = &matrix.clauses[other_clause_id as usize]; // check if clause is subset w.r.t. inner variables if clause_id != other_clause_id && self.relevant_clauses[other_clause_id as usize] { let pos = match self.conjunction.binary_search(&other_clause_id) { Ok(_) => continue, // already contained, skip Err(pos) => pos, // position to insert }; if other_clause.is_subset_wrt_predicate(clause, |l| { matrix.prefix.variables().get(l.variable()).scope > scope_id }) { debug_assert!(!self.max_clauses[other_clause_id as usize]); self.conjunction.insert(pos, other_clause_id); } } } } debug_assert!(self.conjunction.len() > 0); if self.conjunction.len() == 1 { // do not need auxilliary variable let clause_id = self.conjunction[0]; let sat_lit = Self::add_b_lit_and_adapt_abstraction( clause_id, &mut self.sat, &self.b_literals, &mut self.t_literals, &mut self.reverse_t_literals, ); blocking_clause.push(sat_lit); self.strong_unsat_cache.insert(clause_id, (sat_lit, false)); } else { // build the conjunction using an auxilliary variable let aux_var = self.sat.new_var(); blocking_clause.push(aux_var); self.strong_unsat_cache.insert(clause_id, (aux_var, true)); for &other_clause_id in self.conjunction.iter() { let sat_lit = Self::add_b_lit_and_adapt_abstraction( other_clause_id, &mut self.sat, &self.b_literals, &mut self.t_literals, &mut self.reverse_t_literals, ); self.sat.add_clause(&[!aux_var, sat_lit]); } applied = true; } } if applied { self.sat.add_clause(blocking_clause.as_ref()); } applied } /// Tries to reduce the size of refinements. /// If a clause is subsumed by another clause in refinement, it can be removed. /// This does not change the number of iterations, but may make the job of SAT solver easier. /// /// Returns true if the refinement clause could be reduced. fn refinement_literal_subsumption_optimization( &mut self, matrix: &QMatrix, next: &mut Box<ScopeRecursiveSolver>, ) -> bool { let mut successful = false; let entry = &mut next.data.entry; 'outer: for i in 0..entry.len() { if !entry[i] { continue; } let clause_id = i as ClauseId; let clause = &matrix.clauses[i]; for &literal in clause.iter() { let info = matrix.prefix.variables().get(literal.variable()); if info.scope > self.scope_id { // do not consider inner variables continue; } // iterate over occurrence list for &other_clause_id in matrix.occurrences(literal) { if clause_id == other_clause_id { continue; } if !entry[other_clause_id as usize] { // not in entry, thus not interesting continue; } let other_clause = &matrix.clauses[other_clause_id as usize]; let current_scope = self.scope_id; // check if other clause subsumes current // check is done with respect to current and outer variables if self.is_universal { if other_clause.is_subset_wrt_predicate(clause, |l| { let info = matrix.prefix.variables().get(l.variable()); info.scope <= current_scope }) { entry.set(clause_id as usize, false); successful = true; continue 'outer; } } else { if clause.is_subset_wrt_predicate(other_clause, |l| { let info = matrix.prefix.variables().get(l.variable()); info.scope <= current_scope }) { entry.set(clause_id as usize, false); successful = true; continue 'outer; } } } } } successful } fn is_expansion_refinement_applicable(&self, next: &mut Box<ScopeRecursiveSolver>) -> bool { if self.is_universal { return false; } //return true; debug_assert!(next.next.len() == 1); return next.next[0].as_ref().next.is_empty(); } fn expansion_refinement(&mut self, matrix: &QMatrix, next: &mut Box<ScopeRecursiveSolver>) { trace!("expansion_refinement"); let universal_assignment = next.get_universal_assignmemnt(HashMap::new()); let (data, next) = next.split(); let next = &next[0]; // add a new sat variable for every existential variable in inner scope (updated lazily) self.expansion_renaming.clear(); let sat = &mut self.sat; let sat_clause = &mut self.sat_solver_assumptions; // create the refinement clauses for (i, clause) in matrix.clauses.iter().enumerate() { if !next.data.relevant_clauses[i] { continue; } // check if the universal assignment satisfies the clause if clause.is_satisfied_by_assignment(&universal_assignment) { continue; } sat_clause.clear(); // add the clause to the abstraction // variables bound by inner existential quantifier have to be renamed let mut contains_variables = false; let mut contains_outer_variables = false; for &literal in clause.iter() { let info = matrix.prefix.variables().get(literal.variable()); if info.scope <= data.scope_id { if info.scope < self.scope_id { contains_outer_variables = true; } continue; } if info.scope % 2 == 1 { debug_assert!(universal_assignment.contains_key(&literal.variable())); continue; } debug_assert!(info.scope > self.scope_id); contains_variables = true; let entry = self.expansion_renaming .entry(literal.variable()) .or_insert_with(|| sat.new_var()); let mut sat_var = *entry; if literal.signed() { sat_var = !sat_var; } sat_clause.push(sat_var); } let clause_id = i as ClauseId; if self.b_literals .binary_search_by(|elem| elem.0.cmp(&clause_id)) .is_ok() || contains_variables && contains_outer_variables { let sat_lit = Self::add_b_lit_and_adapt_abstraction( clause_id, sat, &self.b_literals, &mut self.t_literals, &mut self.reverse_t_literals, ); sat_clause.push(sat_lit); } if !contains_variables { continue; } if !sat_clause.is_empty() { sat.add_clause(sat_clause.as_ref()); } } } fn add_b_lit_and_adapt_abstraction( clause_id: ClauseId, sat: &mut cryptominisat::Solver, b_literals: &Vec<(ClauseId, Lit)>, t_literals: &mut Vec<(ClauseId, Lit)>, reverse_t_literals: &mut HashMap<u32, Variable>, ) -> Lit { // first check if there is a b-literal for clause // if yes, just return it (the currents scope influences clause since there is at least one variable contained) // if no, we continue match b_literals.binary_search_by(|elem| elem.0.cmp(&clause_id)) { Ok(pos) => return b_literals[pos].1, Err(_) => {} }; // we then check, if there is a corresponding t-literal // if yes, we return this instead // if no, we have to adapt the abstraction by inserting a new t-literal let insert_pos = match t_literals.binary_search_by(|elem| elem.0.cmp(&clause_id)) { Ok(pos) => return t_literals[pos].1, Err(pos) => pos, }; let sat_lit = sat.new_var(); t_literals.insert(insert_pos, (clause_id, sat_lit)); reverse_t_literals.insert(sat_lit.var(), clause_id); // note that, we could also adapt b_literals (with the same sat_literal) // however, this is not necessary and not directly obvious // 1) reason *not* to do it: in get_assumptions we iterate over b_literals to check // if we can improve the assumptions produced by the SAT solver. Since the clauses // that are added here have no influence of current scope, this check is wasted time // 2) we do not *need* them, because abstraction entries are just copied from one // scope to another sat_lit } fn get_unsat_core(&mut self) { trace!("unsat_core"); self.entry.clear(); #[cfg(debug_assertions)] let mut debug_print = String::new(); let failed = self.sat.get_conflict(); for l in failed { let clause_id = self.reverse_t_literals[&l.var()]; self.entry.set(clause_id as usize, true); #[cfg(debug_assertions)] debug_print.push_str(&format!(" t{}", clause_id)); } #[cfg(debug_assertions)] debug!("unsat core: {}", debug_print); } /// filters those clauses that are only influenced by this quantifier (or inner) fn unsat_propagation(&mut self) { self.entry.difference(&self.max_clauses); } } struct ScopeRecursiveSolver { data: ScopeSolverData, next: Vec<Box<ScopeRecursiveSolver>>, } impl ScopeRecursiveSolver { fn new( matrix: &QMatrix, options: CaqeSolverOptions, scope: &Scope, quantifier: Quantifier, next: Vec<Box<ScopeRecursiveSolver>>, ) -> ScopeRecursiveSolver { let mut relevant_clauses = BitVec::from_elem(matrix.clauses.len(), false); for ref next_scope in next.iter() { #[cfg(debug_assertions)] { // the branches have pairwise disjoint relevant clauses let mut copy = relevant_clauses.clone(); copy.intersect(&next_scope.data.relevant_clauses); assert!(copy.none()); } relevant_clauses.union(&next_scope.data.relevant_clauses); } let mut candidate = ScopeRecursiveSolver { data: ScopeSolverData::new(matrix, options, scope, relevant_clauses), next: next, }; // add variables of scope to sat solver for &variable in scope.variables.iter() { candidate .data .variable_to_sat .insert(variable, candidate.data.sat.new_var()); } match quantifier { Quantifier::Existential => candidate.data.new_existential(matrix, scope), Quantifier::Universal => candidate.data.new_universal(matrix, scope), } candidate } fn init_abstraction_recursively( matrix: &QMatrix, options: CaqeSolverOptions, scope_node: &Box<ScopeNode>, ) -> Box<ScopeRecursiveSolver> { let mut prev = Vec::new(); for ref child_node in scope_node.next.iter() { prev.push(Self::init_abstraction_recursively( matrix, options.clone(), child_node, )) } let scope = &scope_node.scope; let result = Box::new(ScopeRecursiveSolver::new( matrix, options, scope, Quantifier::from(scope.id), prev, )); #[cfg(debug_assertions)] { // check consistency of interface literals // for every b_lit in abstraction, there is a corresponding t_lit in one of its inner abstractions /*for &(clause_id, _b_lit) in result.data.b_literals.iter() { let mut current = &result; let mut found = false; while let Some(next) = current.next.as_ref() { if next.data .t_literals .binary_search_by(|elem| elem.0.cmp(&clause_id)) .is_ok() { found = true; break; } current = &next; } if !found { panic!( "missing t-literal for b-literal {} at scope {}", clause_id, scope.id ); } }*/ /*if scope_id == 0 { let mut abstractions = Vec::new(); Self::verify_t_literals(&mut abstractions, result.as_ref()); }*/ } result } /*#[cfg(debug_assertions)] fn verify_t_literals<'a>( abstractions: &mut Vec<&'a ScopeRecursiveSolver>, scope: &'a ScopeRecursiveSolver, ) { // check that for every clause containing a t-literal at this scope, // there is a clause containing a b-literal in the previous scope abstractions.push(scope); for next in scope.next { for &(clause_id, _t_lit) in next.data.t_literals.iter() { let has_matching_b_lit = abstractions.iter().fold(false, |val, &abstraction| { val || abstraction .data .b_literals .binary_search_by(|elem| elem.0.cmp(&clause_id)) .is_ok() }); if !has_matching_b_lit { panic!( "missing b-literal for t-literal {} at scope {}", clause_id, next.data.scope_id ); } } Self::verify_t_literals(abstractions, next.as_ref()); } abstractions.pop(); }*/ fn solve_recursive(&mut self, matrix: &QMatrix) -> SolverResult { trace!("solve_recursive"); // mutable split let current = &mut self.data; let next = &mut self.next; let good_result = if current.is_universal { SolverResult::Unsatisfiable } else { SolverResult::Satisfiable }; let bad_result = if current.is_universal { SolverResult::Satisfiable } else { SolverResult::Unsatisfiable }; debug_assert!(good_result != bad_result); loop { debug!(""); info!("solve level {}", current.scope_id); #[cfg(feature = "statistics")] let mut timer = current .statistics .start(SolverScopeEvents::SolveScopeAbstraction); match current.check_candidate_exists(next) { Lbool::True => { // there is a candidate solution, verify it recursively current.update_assignment(); if next.is_empty() { // innermost scope, propagate result to outer scopes debug_assert!(!current.is_universal); //current.entry.clear(); current.entry_minimization(matrix); return good_result; } current.get_assumptions(matrix, next); #[cfg(feature = "statistics")] timer.stop(); current.sub_result = good_result; for ref mut scope in next.iter_mut() { let result = scope.solve_recursive(matrix); if result == bad_result { debug_assert!(result == bad_result); current.sub_result = bad_result; #[cfg(feature = "statistics")] let mut _timer = current.statistics.start(SolverScopeEvents::Refinement); current.refine(matrix, scope); } } if current.sub_result == bad_result { continue; } else { // copy entries from inner quantifier current.entry.clear(); for ref scope in next.iter() { current.entry.union(&scope.data.entry); } // apply entry optimization if current.is_universal { current.unsat_propagation(); } else { current.entry_minimization(matrix); } return good_result; } } Lbool::False => { // there is no candidate solution, return witness current.get_unsat_core(); return bad_result; } _ => panic!("inconsistent internal state"), } } } #[cfg(feature = "statistics")] pub fn print_statistics(&self) { println!("level {}", self.data.scope_id); self.data.statistics.print(); for ref next in self.next.iter() { next.print_statistics() } } fn split(&mut self) -> (&mut ScopeSolverData, &mut Vec<Box<ScopeRecursiveSolver>>) { (&mut self.data, &mut self.next) } fn get_universal_assignmemnt( &self, mut assignment: HashMap<Variable, bool>, ) -> HashMap<Variable, bool> { if self.data.is_universal { assignment.extend(self.data.assignments.iter()); } for ref next in self.next.iter() { assignment = next.get_universal_assignmemnt(assignment); } assignment } } struct MinMax { min: Option<i32>, max: Option<i32>, } impl MinMax { fn new() -> MinMax { MinMax { min: None, max: None, } } fn update(&mut self, value: i32) { match (self.min, self.max) { (None, None) => { self.min = Some(value); self.max = Some(value); } (Some(min), Some(max)) => { if value < min { self.min = Some(value); } if value > max { self.max = Some(value); } } _ => panic!("inconsistent internal state"), } } fn min(&self) -> i32 { self.min.unwrap() } fn max(&self) -> i32 { self.max.unwrap() } fn get(&self) -> (i32, i32) { (self.min(), self.max()) } } #[cfg(test)] mod tests { use super::*; use solver::Solver; #[test] fn test_false() { let instance = "p cnf 0 1\n0\n"; let matrix = qdimacs::parse(&instance).unwrap(); assert!(matrix.conflict()); } #[test] fn test_true() { let instance = "p cnf 0 0"; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Satisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 1 0 0\n"); } #[test] fn test_sat_simple() { let instance = "c p cnf 4 4 a 1 2 0 e 3 4 0 1 3 0 -1 4 0 -3 -4 0 -1 2 4 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Satisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 1 4 4\n"); } #[test] fn test_unsat_simple() { let instance = "c p cnf 4 4 a 1 2 0 e 3 4 0 1 3 0 -1 4 0 -3 -4 0 1 2 4 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Unsatisfiable); assert_eq!( solver.qdimacs_output().dimacs(), "s cnf 0 4 4\nV -1 0\nV -2 0\n" ); } #[test] fn test_two_alternations() { let instance = "c p cnf 11 24 a 1 0 e 2 0 a 3 0 e 4 5 6 7 8 9 10 11 0 3 5 0 -4 5 0 -3 4 -5 0 -3 6 0 4 6 0 3 -4 -6 0 2 -7 0 5 -7 0 6 -7 0 -2 -5 -6 7 0 -1 8 0 -7 8 0 1 7 -8 0 -2 -9 0 5 -9 0 6 -9 0 2 -5 -6 9 0 1 10 0 -9 10 0 -1 9 -10 0 8 -11 0 10 -11 0 -8 -10 11 0 11 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Satisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 1 11 24\n"); } #[test] fn test_wrong_sat() { let instance = "c c This instance was falsly characterized as SAT p cnf 4 3 a 4 0 e 3 0 a 1 0 e 2 0 -3 0 3 -4 0 -2 -1 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Unsatisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 0 4 3\nV 4 0\n"); } #[test] fn test_cnf() { let instance = "c c CNF instance without quantifier p cnf 1 2 -1 0 1 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Unsatisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 0 1 2\n"); } #[test] fn test_wrong_unsat() { let instance = "c c This instance was falsly characterized as UNSAT p cnf 3 2 a 1 2 0 e 3 0 3 -2 0 -3 -1 2 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Satisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 1 3 2\n"); } #[test] fn test_strong_unsat_crash() { let instance = "c c This instance crashed with strong unsat refinement p cnf 4 3 a 2 0 e 1 0 a 4 0 e 3 0 1 3 0 -3 -2 0 3 -4 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Unsatisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 0 4 3\nV 2 0\n"); } #[test] fn test_refinement_literal_failure() { let instance = "c c This instance was solved incorrectly in earlier versions due to refinement literal optimization p cnf 5 5 a 5 0 e 3 0 a 1 0 e 2 4 0 -2 0 4 5 0 -4 -5 0 -4 -5 -1 0 2 3 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Satisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 1 5 5\n"); } #[test] fn test_refinement_literal_failure2() { let instance = "c c This instance was solved incorrectly in earlier versions due to refinement literal optimization p cnf 4 3 a 4 0 e 1 0 a 3 0 e 2 0 -2 0 2 -3 -4 0 -1 -4 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Unsatisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 0 4 3\nV 4 0\n"); } #[test] fn test_abstraction_literal_optimization_vs_strong_unsat() { let instance = "c c This instance was solved incorrectly in earlier versions due to abstraction literal optimization p cnf 3 4 e 3 0 a 1 0 e 2 0 -2 -1 0 -2 0 -2 3 0 3 2 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Satisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 1 3 4\nV 3 0\n"); } #[test] fn test_strong_unsat_failure() { let instance = "c c This instance was solved incorrectly in earlier versions due to strong unsat refinement. c The strong unsat refinement can only applied to clauses which actually contains inner variables. p cnf 4 3 e 2 3 0 a 4 0 e 1 0 -1 0 -2 3 0 3 1 -4 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Satisfiable); assert_eq!( solver.qdimacs_output().dimacs(), "s cnf 1 4 3\nV -2 0\nV 3 0\n" ); } #[test] fn test_fuzz_unsat() { let instance = "c c This instance was solved incorrectly in earlier versions. p cnf 5 5 e 1 5 0 a 4 0 e 2 3 0 -5 1 3 0 1 -5 0 -1 0 -2 4 0 5 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Unsatisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 0 5 5\n"); } #[test] fn test_fuzz_sat() { let instance = "c c This instance was solved incorrectly in earlier versions. p cnf 4 4 e 2 0 a 4 0 e 1 3 0 1 0 2 1 0 3 -4 0 -3 2 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Satisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 1 4 4\nV 2 0\n"); } #[test] fn test_wrong_unsat_miniscoping() { let instance = "c c This instance was solved incorrectly in earlier versions. p cnf 4 4 e 4 0 a 2 0 e 1 3 0 4 1 0 -1 0 4 -3 0 1 2 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Unsatisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 0 4 4\n"); } #[test] fn test_wrong_expansion_refinement() { let instance = "c c This instance was solved incorrectly in earlier versions. c The first conflict happens at level 2, then expansion refinement did not have universal assignments for level 3 p cnf 7 6 e 7 0 a 4 0 e 2 6 0 a 5 0 e 1 3 0 -3 5 0 3 -5 0 2 0 6 4 0 -2 7 0 -3 -2 -1 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Satisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 1 7 6\nV 7 0\n"); } #[test] fn test_strong_unsat_failure_2() { let instance = "c c This instance was solved incorrectly in earlier versions. p cnf 5 4 e 1 0 a 3 0 e 4 0 a 5 0 e 2 0 -2 0 -2 1 -4 3 -5 0 4 0 -4 2 1 3 0 "; let matrix = qdimacs::parse(&instance).unwrap(); let matrix = Matrix::unprenex_by_miniscoping(matrix, false); let mut solver = CaqeSolver::new(&matrix); assert_eq!(solver.solve(), SolverResult::Satisfiable); assert_eq!(solver.qdimacs_output().dimacs(), "s cnf 1 5 4\nV 1 0\n"); } }
//! A safe wrapper for borrowed mutable references. //! //! See: https://github.com/PyO3/pyo3/issues/1180 use pyo3::Python; use std::sync::atomic::{AtomicPtr, Ordering}; /// A wrapper for a mutable reference. pub struct SafeRef<T>(*mut AtomicPtr<T>); unsafe impl<T> Send for SafeRef<T> {} impl<T> Drop for SafeRef<T> { fn drop(&mut self) { unsafe { let box_ptr = self.0; let ptr = &*(box_ptr as *const AtomicPtr<String>); // CDSChecker suggests it's ok these are Relaxed if ptr.load(Ordering::Relaxed).is_null() { std::mem::drop(Box::from_raw(box_ptr)); } else { ptr.store(std::ptr::null_mut(), Ordering::Relaxed); } } } } impl<T> SafeRef<T> { /// Run a closure with a wrapped reference. /// /// The `obj` reference is wrapped in a `SafeRef` so that it is cleared /// when the scope ends. In conjunction with the guarantees provided by /// running under the GIL, this should be entirely safe. /// /// Note: I've done some analysis and experimentation, but am not yet /// completely confident this actually is safe. pub fn scoped<'p, U>(_py: Python<'p>, obj: &mut T, f: impl FnOnce(SafeRef<T>) -> U) -> U { let box_ptr = Box::into_raw(Box::new(AtomicPtr::new(obj))); let wrapper = SafeRef(box_ptr); let result = f(wrapper); std::mem::drop(SafeRef(box_ptr)); result } /// Get the mutable reference. /// /// The `Python<'p>` argument is a guarantee this is run under GIL. pub fn try_get_mut<'p>(&mut self, _py: Python<'p>) -> Option<&mut T> { unsafe { let ptr = (*self.0).load(Ordering::Relaxed); if ptr.is_null() { None } else { Some(&mut *ptr) } } } }
//! Portable relative UTF-8 paths for Rust. //! //! This provide a module analogous to [std::path], with the following characteristics: //! //! * The path separator is set to a fixed character (`/`), regardless of platform. //! * Relative paths cannot represent a path in the filesystem without first specifying what they //! are *relative to* through [to_path]. //! * Relative paths are always guaranteed to be a UTF-8 string. //! //! On top of this we support many path-like operations that guarantee portable behavior. //! //! ## Serde Support //! //! This library includes serde support that can be enabled with the `serde` feature. //! //! ## Why is `std::path` a portability hazard? //! //! Path representations differ across platforms. //! //! * Windows permits using drive volumes (multiple roots) as a prefix (e.g. `"c:\"`) and backslash (`\`) as a separator. //! * Unix references absolute paths from a single root and uses slash (`/`) as a separator. //! //! If we use `PathBuf`, Storing paths like this in a manifest would happily allow our applications to build and run on one platform, but potentially not others. //! //! Consider the following manifest: //! //! ```rust //! use std::path::PathBuf; //! use serde::{Serialize, Deserialize}; //! //! #[derive(Serialize, Deserialize)] //! struct Manifest { //! source: PathBuf, //! } //! ``` //! //! Which represents this TOML file: //! //! ```toml //! # Uh oh, trouble. //! source = "C:\\path\\to\\source" //! ``` //! //! Assuming `"C:\\path\\to\\source"` is a legal path on Windows, this will //! happily run for one platform when checked into source control but not //! others. //! //! Since [RelativePath] strictly uses `/` as a separator it avoids this issue. //! Anything non-slash will simply be considered part of a *distinct component*. //! //! Conversion to [Path] may only happen if it is known which path it is //! relative to through the [to_path] or [to_logical_path] functions. This is //! where the relative part of the name comes from. //! //! ```rust //! use relative_path::RelativePath; //! use std::path::Path; //! //! # if cfg!(windows) { //! // to_path unconditionally concatenates a relative path with its base: //! let relative_path = RelativePath::new("../foo/./bar"); //! let full_path = relative_path.to_path("C:\\"); //! assert_eq!(full_path, Path::new("C:\\..\\foo\\.\\bar")); //! //! // to_logical_path tries to apply the logical operations that the relative //! // path corresponds to: //! let relative_path = RelativePath::new("../foo/./bar"); //! let full_path = relative_path.to_logical_path("C:\\baz"); //! assert_eq!(full_path, Path::new("C:\\foo\\bar")); //! # } //! ``` //! //! This would permit relative paths to portably be used in project manifests or configurations. //! Where files are referenced from some specific, well-known point in the filesystem. //! //! ```toml //! source = "path/to/source" //! ``` //! //! The fixed manifest would look like this: //! //! ```rust //! use relative_path::RelativePathBuf; //! use serde::{Serialize, Deserialize}; //! //! #[derive(Serialize, Deserialize)] //! pub struct Manifest { //! source: RelativePathBuf, //! } //! ``` //! //! ## Overview //! //! When two relative paths are compared to each other, their exact component makeup determines equality. //! //! ```rust //! use relative_path::RelativePath; //! //! assert_ne!( //! RelativePath::new("foo/bar/../baz"), //! RelativePath::new("foo/baz") //! ); //! ``` //! //! Using platform-specific path separators to construct relative paths is not supported. //! //! Path separators from other platforms are simply treated as part of a component: //! //! ```rust //! use relative_path::RelativePath; //! //! assert_ne!( //! RelativePath::new("foo/bar"), //! RelativePath::new("foo\\bar") //! ); //! //! assert_eq!(1, RelativePath::new("foo\\bar").components().count()); //! assert_eq!(2, RelativePath::new("foo/bar").components().count()); //! ``` //! //! To see if two relative paths are equivalent you can use [normalize]: //! //! ```rust //! use relative_path::RelativePath; //! //! assert_eq!( //! RelativePath::new("foo/bar/../baz").normalize(), //! RelativePath::new("foo/baz").normalize(), //! ); //! ``` //! //! ## Additional portability notes //! //! While relative paths avoid the most egregious portability issues, namely that absolute paths will work equally unwell on all platforms. //! We do not avoid all. //! //! This section tries to document additional portability issues that we know //! about. //! //! [RelativePath], similarly to [Path], makes no guarantees that the components represented in them //! makes up legal file names. //! While components are strictly separated by slashes, we can still store things in path components which may not be used as legal paths on all platforms. //! //! * `NUL` is not permitted on unix platforms - this is a terminator in C-based filesystem APIs. Slash //! (`/`) is also used as a path separator. //! * Windows has a number of [reserved characters and names][windows-reserved]. //! //! As a relative path that *actually* contains a platform-specific absolute path //! will result in a nonsensical path being generated. //! //! ```rust //! use relative_path::RelativePath; //! use std::path::Path; //! //! if cfg!(windows) { //! assert_eq!( //! Path::new("foo\\c:\\bar\\baz"), //! RelativePath::new("c:\\bar\\baz").to_path("foo") //! ); //! } //! //! if cfg!(unix) { //! assert_eq!( //! Path::new("foo/bar/baz"), //! RelativePath::new("/bar/baz").to_path("foo") //! ); //! } //! ``` //! //! This is intentional in order to cause an early breakage when a platform //! encounters paths like `"foo/c:\\bar\\baz"` to signal that it is a //! portability hazard. //! On Unix it's a bit more subtle with `""foo/bar/baz""`, since the leading //! slash (`/`) will simply be ignored. //! The hope is that it will be more probable to cause an early error unless a //! compatible relative path *also* exists. //! //! [windows-reserved]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx //! [RelativePath]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html //! [to_path]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html#method.to_path //! [to_logical_path]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html#method.to_logical_path //! [normalize]: https://docs.rs/relative-path/1/relative_path/struct.RelativePath.html#method.normalize //! [None]: https://doc.rust-lang.org/std/option/enum.Option.html //! [std::path]: https://doc.rust-lang.org/std/path/index.html //! [Path]: https://doc.rust-lang.org/std/path/struct.Path.html // This file contains parts that are Copyright 2015 The Rust Project Developers, copied from: // https://github.com/rust-lang/rust // cb2a656cdfb6400ac0200c661267f91fabf237e2 src/libstd/path.rs #![deny(missing_docs)] #![deny(broken_intra_doc_links)] use std::borrow::{Borrow, Cow}; use std::cmp; use std::error; use std::fmt; use std::hash::{Hash, Hasher}; use std::mem; use std::ops::{self, Deref}; use std::path; use std::str; #[cfg(feature = "serde")] extern crate serde; const STEM_SEP: char = '.'; const CURRENT_STR: &str = "."; const PARENT_STR: &str = ".."; const SEP: char = '/'; fn split_file_at_dot(input: &str) -> (Option<&str>, Option<&str>) { if input == PARENT_STR { return (Some(input), None); } let mut iter = input.rsplitn(2, STEM_SEP); let after = iter.next(); let before = iter.next(); if before == Some("") { (Some(input), None) } else { (before, after) } } // Iterate through `iter` while it matches `prefix`; return `None` if `prefix` // is not a prefix of `iter`, otherwise return `Some(iter_after_prefix)` giving // `iter` after having exhausted `prefix`. fn iter_after<'a, 'b, I, J>(mut iter: I, mut prefix: J) -> Option<I> where I: Iterator<Item = Component<'a>> + Clone, J: Iterator<Item = Component<'b>>, { loop { let mut iter_next = iter.clone(); match (iter_next.next(), prefix.next()) { (Some(ref x), Some(ref y)) if x == y => (), (Some(_), Some(_)) => return None, (Some(_), None) => return Some(iter), (None, None) => return Some(iter), (None, Some(_)) => return None, } iter = iter_next; } } /// A single path component. /// /// Accessed using the [RelativePath::components] iterator. /// /// # Examples /// /// ```rust /// use relative_path::{Component, RelativePath}; /// /// let path = RelativePath::new("foo/../bar/./baz"); /// let mut it = path.components(); /// /// assert_eq!(Some(Component::Normal("foo")), it.next()); /// assert_eq!(Some(Component::ParentDir), it.next()); /// assert_eq!(Some(Component::Normal("bar")), it.next()); /// assert_eq!(Some(Component::CurDir), it.next()); /// assert_eq!(Some(Component::Normal("baz")), it.next()); /// assert_eq!(None, it.next()); /// ``` #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] pub enum Component<'a> { /// The current directory `.`. CurDir, /// The parent directory `..`. ParentDir, /// A normal path component as a string. Normal(&'a str), } impl<'a> Component<'a> { /// Extracts the underlying [str] slice. /// /// # Examples /// /// ``` /// use relative_path::{RelativePath, Component}; /// /// let path = RelativePath::new("./tmp/../foo/bar.txt"); /// let components: Vec<_> = path.components().map(Component::as_str).collect(); /// assert_eq!(&components, &[".", "tmp", "..", "foo", "bar.txt"]); /// ``` /// /// [str]: https://doc.rust-lang.org/std/primitive.str.html pub fn as_str(self) -> &'a str { use self::Component::*; match self { CurDir => CURRENT_STR, ParentDir => PARENT_STR, Normal(name) => name, } } } /// Traverse the given components and apply to the provided stack. /// /// This takes '.', and '..' into account. Where '.' doesn't change the stack, and '..' pops the /// last item or further adds parent components. #[inline(always)] fn relative_traversal<'a, C>(stack: &mut Vec<&'a str>, components: C) where C: IntoIterator<Item = Component<'a>>, { use self::Component::*; for c in components { match c { CurDir => (), ParentDir => match stack.last().copied() { Some(PARENT_STR) | None => { stack.push(PARENT_STR); } _ => { stack.pop(); } }, Normal(name) => stack.push(name), } } } /// Iterator over all the components in a relative path. #[derive(Clone)] pub struct Components<'a> { source: &'a str, } impl<'a> Iterator for Components<'a> { type Item = Component<'a>; fn next(&mut self) -> Option<Self::Item> { self.source = self.source.trim_start_matches(SEP); let slice = match self.source.find(SEP) { Some(i) => { let (slice, rest) = self.source.split_at(i); self.source = rest.trim_start_matches(SEP); slice } None => mem::replace(&mut self.source, ""), }; match slice { "" => None, "." => Some(Component::CurDir), ".." => Some(Component::ParentDir), slice => Some(Component::Normal(slice)), } } } impl<'a> DoubleEndedIterator for Components<'a> { fn next_back(&mut self) -> Option<Self::Item> { self.source = self.source.trim_end_matches(SEP); let slice = match self.source.rfind(SEP) { Some(i) => { let (rest, slice) = self.source.split_at(i + 1); self.source = rest.trim_end_matches(SEP); slice } None => mem::replace(&mut self.source, ""), }; match slice { "" => None, "." => Some(Component::CurDir), ".." => Some(Component::ParentDir), slice => Some(Component::Normal(slice)), } } } impl<'a> Components<'a> { /// Construct a new component from the given string. fn new(source: &'a str) -> Components<'a> { Self { source } } /// Extracts a slice corresponding to the portion of the path remaining for iteration. /// /// # Examples /// /// ``` /// use relative_path::RelativePath; /// /// let mut components = RelativePath::new("tmp/foo/bar.txt").components(); /// components.next(); /// components.next(); /// /// assert_eq!("bar.txt", components.as_relative_path()); /// ``` pub fn as_relative_path(&self) -> &'a RelativePath { RelativePath::new(self.source) } } impl<'a> cmp::PartialEq for Components<'a> { fn eq(&self, other: &Components<'a>) -> bool { Iterator::eq(self.clone(), other.clone()) } } /// An iterator over the [Component]s of a [RelativePath], as [str] slices. /// /// This `struct` is created by the [iter] method. /// /// [iter]: RelativePath::iter /// [str]: https://doc.rust-lang.org/std/primitive.str.html #[derive(Clone)] pub struct Iter<'a> { inner: Components<'a>, } impl<'a> Iterator for Iter<'a> { type Item = &'a str; fn next(&mut self) -> Option<&'a str> { self.inner.next().map(Component::as_str) } } impl<'a> DoubleEndedIterator for Iter<'a> { fn next_back(&mut self) -> Option<&'a str> { self.inner.next_back().map(Component::as_str) } } /// Error kind for [FromPathError]. #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[non_exhaustive] pub enum FromPathErrorKind { /// Non-relative component in path. NonRelative, /// Non-utf8 component in path. NonUtf8, /// Trying to convert a platform-specific path which uses a platform-specific separator. BadSeparator, } /// An error raised when attempting to convert a path using [RelativePathBuf::from_path]. #[derive(Debug, Clone, PartialEq, Eq)] pub struct FromPathError { kind: FromPathErrorKind, } impl FromPathError { /// Gets the underlying [FromPathErrorKind] that provides more details on /// what went wrong. /// /// # Examples /// /// ```rust /// use std::path::Path; /// use relative_path::{FromPathErrorKind, RelativePathBuf}; /// /// let result = RelativePathBuf::from_path(Path::new("/hello/world")); /// let e = result.unwrap_err(); /// /// assert_eq!(FromPathErrorKind::NonRelative, e.kind()); /// ``` pub fn kind(&self) -> FromPathErrorKind { self.kind } } impl From<FromPathErrorKind> for FromPathError { fn from(value: FromPathErrorKind) -> Self { Self { kind: value } } } impl fmt::Display for FromPathError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match self.kind { FromPathErrorKind::NonRelative => "path contains non-relative component".fmt(fmt), FromPathErrorKind::NonUtf8 => "path contains non-utf8 component".fmt(fmt), FromPathErrorKind::BadSeparator => { "path contains platform-specific path separator".fmt(fmt) } } } } impl error::Error for FromPathError {} /// An owned, mutable relative path. /// /// This type provides methods to manipulate relative path objects. #[derive(Clone)] pub struct RelativePathBuf { inner: String, } impl RelativePathBuf { /// Create a new relative path buffer. pub fn new() -> RelativePathBuf { RelativePathBuf { inner: String::new(), } } /// Try to convert a [Path] to a [RelativePathBuf]. /// /// [Path]: https://doc.rust-lang.org/std/path/struct.Path.html /// /// # Examples /// /// ```rust /// use relative_path::{RelativePath, RelativePathBuf, FromPathErrorKind}; /// use std::path::Path; /// /// assert_eq!( /// Ok(RelativePath::new("foo/bar").to_owned()), /// RelativePathBuf::from_path(Path::new("foo/bar")) /// ); /// ``` pub fn from_path<P: AsRef<path::Path>>(path: P) -> Result<RelativePathBuf, FromPathError> { use std::path::Component::*; let mut buffer = RelativePathBuf::new(); for c in path.as_ref().components() { match c { Prefix(_) | RootDir => return Err(FromPathErrorKind::NonRelative.into()), CurDir => continue, ParentDir => buffer.push(".."), Normal(s) => buffer.push(s.to_str().ok_or(FromPathErrorKind::NonUtf8)?), } } Ok(buffer) } /// Extends `self` with `path`. /// /// If `path` is absolute, it replaces the current path. /// /// # Examples /// /// ```rust /// use relative_path::{RelativePathBuf, RelativePath}; /// /// let mut path = RelativePathBuf::new(); /// path.push("foo"); /// path.push("bar"); /// /// assert_eq!("foo/bar", path); /// ``` pub fn push<P: AsRef<RelativePath>>(&mut self, path: P) { let other = path.as_ref(); let other = if other.starts_with_sep() { &other.inner[1..] } else { &other.inner[..] }; if !self.inner.is_empty() && !self.ends_with_sep() { self.inner.push(SEP); } self.inner.push_str(other) } /// Updates [file_name] to `file_name`. /// /// If [file_name] was [None], this is equivalent to pushing /// `file_name`. /// /// Otherwise it is equivalent to calling [pop] and then pushing /// `file_name`. The new path will be a sibling of the original path. /// (That is, it will have the same parent.) /// /// [file_name]: RelativePath::file_name /// [pop]: RelativePathBuf::pop /// [None]: https://doc.rust-lang.org/std/option/enum.Option.html /// /// # Examples /// /// ``` /// use relative_path::RelativePathBuf; /// /// let mut buf = RelativePathBuf::from(""); /// assert!(buf.file_name() == None); /// buf.set_file_name("bar"); /// assert_eq!(RelativePathBuf::from("bar"), buf); /// /// assert!(buf.file_name().is_some()); /// buf.set_file_name("baz.txt"); /// assert_eq!(RelativePathBuf::from("baz.txt"), buf); /// /// buf.push("bar"); /// assert!(buf.file_name().is_some()); /// buf.set_file_name("bar.txt"); /// assert_eq!(RelativePathBuf::from("baz.txt/bar.txt"), buf); /// ``` pub fn set_file_name<S: AsRef<str>>(&mut self, file_name: S) { if self.file_name().is_some() { let popped = self.pop(); debug_assert!(popped); } self.push(file_name.as_ref()); } /// Updates [extension] to `extension`. /// /// Returns `false` and does nothing if [file_name] is [None], /// returns `true` and updates the extension otherwise. /// /// If [extension] is [None], the extension is added; otherwise /// it is replaced. /// /// [file_name]: RelativePath::file_name /// [extension]: RelativePath::extension /// [None]: https://doc.rust-lang.org/std/option/enum.Option.html /// /// # Examples /// /// ``` /// use relative_path::{RelativePath, RelativePathBuf}; /// /// let mut p = RelativePathBuf::from("feel/the"); /// /// p.set_extension("force"); /// assert_eq!(RelativePath::new("feel/the.force"), p); /// /// p.set_extension("dark_side"); /// assert_eq!(RelativePath::new("feel/the.dark_side"), p); /// /// assert!(p.pop()); /// p.set_extension("nothing"); /// assert_eq!(RelativePath::new("feel.nothing"), p); /// ``` pub fn set_extension<S: AsRef<str>>(&mut self, extension: S) -> bool { let file_stem = match self.file_stem() { Some(stem) => stem, None => return false, }; let end_file_stem = file_stem[file_stem.len()..].as_ptr() as usize; let start = self.inner.as_ptr() as usize; self.inner.truncate(end_file_stem.wrapping_sub(start)); let extension = extension.as_ref(); if !extension.is_empty() { self.inner.push(STEM_SEP); self.inner.push_str(extension); } true } /// Truncates `self` to [parent]. /// /// [parent]: RelativePath::parent /// /// # Examples /// /// ``` /// use relative_path::{RelativePath, RelativePathBuf}; /// /// let mut p = RelativePathBuf::from("test/test.rs"); /// /// assert_eq!(true, p.pop()); /// assert_eq!(RelativePath::new("test"), p); /// assert_eq!(true, p.pop()); /// assert_eq!(RelativePath::new(""), p); /// assert_eq!(false, p.pop()); /// assert_eq!(RelativePath::new(""), p); /// ``` pub fn pop(&mut self) -> bool { match self.parent().map(|p| p.inner.len()) { Some(len) => { self.inner.truncate(len); true } None => false, } } /// Coerce to a [RelativePath] slice. pub fn as_relative_path(&self) -> &RelativePath { self } } impl Default for RelativePathBuf { fn default() -> Self { RelativePathBuf::new() } } impl<'a> From<&'a RelativePath> for Cow<'a, RelativePath> { #[inline] fn from(s: &'a RelativePath) -> Cow<'a, RelativePath> { Cow::Borrowed(s) } } impl<'a> From<RelativePathBuf> for Cow<'a, RelativePath> { #[inline] fn from(s: RelativePathBuf) -> Cow<'a, RelativePath> { Cow::Owned(s) } } impl fmt::Debug for RelativePathBuf { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{:?}", &self.inner) } } impl AsRef<RelativePath> for RelativePathBuf { fn as_ref(&self) -> &RelativePath { RelativePath::new(&self.inner) } } impl Borrow<RelativePath> for RelativePathBuf { fn borrow(&self) -> &RelativePath { self.deref() } } impl<'a, T: ?Sized + AsRef<str>> From<&'a T> for RelativePathBuf { fn from(path: &'a T) -> RelativePathBuf { RelativePathBuf { inner: path.as_ref().to_owned(), } } } impl From<String> for RelativePathBuf { fn from(path: String) -> RelativePathBuf { RelativePathBuf { inner: path } } } impl ops::Deref for RelativePathBuf { type Target = RelativePath; fn deref(&self) -> &RelativePath { RelativePath::new(&self.inner) } } impl cmp::PartialEq for RelativePathBuf { fn eq(&self, other: &RelativePathBuf) -> bool { self.components() == other.components() } } impl cmp::Eq for RelativePathBuf {} impl cmp::PartialOrd for RelativePathBuf { fn partial_cmp(&self, other: &RelativePathBuf) -> Option<cmp::Ordering> { self.components().partial_cmp(other.components()) } } impl cmp::Ord for RelativePathBuf { fn cmp(&self, other: &RelativePathBuf) -> cmp::Ordering { self.components().cmp(other.components()) } } impl Hash for RelativePathBuf { fn hash<H: Hasher>(&self, h: &mut H) { self.as_relative_path().hash(h) } } /// A borrowed, immutable relative path. #[repr(transparent)] pub struct RelativePath { inner: str, } /// An error returned from [strip_prefix] if the prefix was not found. /// /// [strip_prefix]: RelativePath::strip_prefix #[derive(Debug, Clone, PartialEq, Eq)] pub struct StripPrefixError(()); impl RelativePath { /// Directly wraps a string slice as a `RelativePath` slice. pub fn new<S: AsRef<str> + ?Sized>(s: &S) -> &RelativePath { unsafe { &*(s.as_ref() as *const str as *const RelativePath) } } /// Try to convert a [Path] to a [RelativePath] without allocating a buffer. /// /// [Path]: https://doc.rust-lang.org/std/path/struct.Path.html /// /// # Errors /// /// This requires the path to be a legal, platform-neutral relative path. /// Otherwise various forms of [FromPathError] will be returned as an [Err]. /// /// [Err]: https://doc.rust-lang.org/std/result/enum.Result.html /// /// # Examples /// /// ```rust /// use relative_path::{RelativePath, FromPathErrorKind}; /// /// assert_eq!( /// Ok(RelativePath::new("foo/bar")), /// RelativePath::from_path("foo/bar") /// ); /// /// // Note: absolute paths are different depending on platform. /// if cfg!(windows) { /// let e = RelativePath::from_path("c:\\foo\\bar").unwrap_err(); /// assert_eq!(FromPathErrorKind::NonRelative, e.kind()); /// } /// /// if cfg!(unix) { /// let e = RelativePath::from_path("/foo/bar").unwrap_err(); /// assert_eq!(FromPathErrorKind::NonRelative, e.kind()); /// } /// ``` pub fn from_path<P: ?Sized + AsRef<path::Path>>( path: &P, ) -> Result<&RelativePath, FromPathError> { use std::path::Component::*; let other = path.as_ref(); let s = match other.to_str() { Some(s) => s, None => return Err(FromPathErrorKind::NonUtf8.into()), }; let rel = RelativePath::new(s); // check that the component compositions are equal. for (a, b) in other.components().zip(rel.components()) { match (a, b) { (Prefix(_), _) | (RootDir, _) => return Err(FromPathErrorKind::NonRelative.into()), (CurDir, Component::CurDir) => continue, (ParentDir, Component::ParentDir) => continue, (Normal(a), Component::Normal(b)) if a == b => continue, _ => return Err(FromPathErrorKind::BadSeparator.into()), } } Ok(rel) } /// Yields the underlying `str` slice. /// /// # Examples /// /// ``` /// use relative_path::RelativePath; /// /// assert_eq!(RelativePath::new("foo.txt").as_str(), "foo.txt"); /// ``` pub fn as_str(&self) -> &str { &self.inner } /// Returns an object that implements [Display]. /// /// [Display]: https://doc.rust-lang.org/std/fmt/trait.Display.html /// /// # Examples /// /// ``` /// use relative_path::RelativePath; /// /// let path = RelativePath::new("tmp/foo.rs"); /// /// println!("{}", path.display()); /// ``` #[deprecated(note = "RelativePath implements std::fmt::Display directly")] pub fn display(&self) -> Display { Display { path: self } } /// Creates an owned [RelativePathBuf] with path adjoined to self. /// /// # Examples /// /// ```rust /// use relative_path::RelativePath; /// /// let path = RelativePath::new("foo/bar"); /// assert_eq!("foo/bar/baz", path.join("baz")); /// ``` pub fn join<P: AsRef<RelativePath>>(&self, path: P) -> RelativePathBuf { let mut out = self.to_relative_path_buf(); out.push(path); out } /// Iterate over all components in this relative path. /// /// # Examples /// /// ```rust /// use relative_path::{Component, RelativePath}; /// /// let path = RelativePath::new("foo/bar/baz"); /// let mut it = path.components(); /// /// assert_eq!(Some(Component::Normal("foo")), it.next()); /// assert_eq!(Some(Component::Normal("bar")), it.next()); /// assert_eq!(Some(Component::Normal("baz")), it.next()); /// assert_eq!(None, it.next()); /// ``` pub fn components(&self) -> Components { Components::new(&self.inner) } /// Produces an iterator over the path's components viewed as [str] slices. /// /// For more information about the particulars of how the path is separated /// into components, see [components]. /// /// [components]: Self::components /// [str]: https://doc.rust-lang.org/std/primitive.str.html /// /// # Examples /// /// ``` /// use relative_path::RelativePath; /// /// let mut it = RelativePath::new("/tmp/foo.txt").iter(); /// assert_eq!(it.next(), Some("tmp")); /// assert_eq!(it.next(), Some("foo.txt")); /// assert_eq!(it.next(), None) /// ``` pub fn iter(&self) -> Iter { Iter { inner: self.components(), } } /// Convert to an owned [RelativePathBuf]. pub fn to_relative_path_buf(&self) -> RelativePathBuf { RelativePathBuf::from(self.inner.to_owned()) } /// Build an owned [PathBuf] relative to `base` for the current relative /// path. /// /// # Examples /// /// ```rust /// use relative_path::RelativePath; /// use std::path::Path; /// /// let path = RelativePath::new("foo/bar").to_path("."); /// assert_eq!(Path::new("./foo/bar"), path); /// ``` /// /// # Encoding an absolute path /// /// Absolute paths are, in contrast to when using [PathBuf::push] *ignored* /// and will be added unchanged to the buffer. /// /// This is to preserve the probability of a path conversion failing if the /// relative path contains platform-specific absolute path components. /// /// ```rust /// use relative_path::RelativePath; /// use std::path::Path; /// /// if cfg!(windows) { /// assert_eq!( /// Path::new("foo\\bar\\baz"), /// RelativePath::new("/bar/baz").to_path("foo") /// ); /// /// assert_eq!( /// Path::new("foo\\c:\\bar\\baz"), /// RelativePath::new("c:\\bar\\baz").to_path("foo") /// ); /// } /// /// if cfg!(unix) { /// assert_eq!( /// Path::new("foo/bar/baz"), /// RelativePath::new("/bar/baz").to_path("foo") /// ); /// /// assert_eq!( /// Path::new("foo/c:\\bar\\baz"), /// RelativePath::new("c:\\bar\\baz").to_path("foo") /// ); /// } /// ``` /// /// [PathBuf]: https://doc.rust-lang.org/std/path/struct.PathBuf.html /// [PathBuf::push]: https://doc.rust-lang.org/std/path/struct.PathBuf.html#method.push pub fn to_path<P: AsRef<path::Path>>(&self, base: P) -> path::PathBuf { let mut p = base.as_ref().to_path_buf().into_os_string(); for c in self.components() { p.push(path::MAIN_SEPARATOR.encode_utf8(&mut [0u8, 0u8, 0u8, 0u8])); p.push(c.as_str()); } path::PathBuf::from(p) } /// Build an owned [PathBuf] relative to `base` for the current relative /// path. /// /// This is similar to [to_path][RelativePath::to_path] except that it /// doesn't just unconditionally append one path to the other, instead it /// performs the following operations depending on its own components: /// /// * [Component::CurDir] leaves the `base` unmodified. /// * [Component::ParentDir] removes a component from `base` using /// [path::PathBuf::pop]. /// * [Component::Normal] pushes the given path component onto `base` using /// the same mechanism as [to_path][RelativePath::to_path]. /// /// Note that the exact semantics of the path operation is determined by the /// corresponding [PathBuf] operation. E.g. popping a component off a path /// like `.` will result in an empty path. /// /// ```rust /// use relative_path::RelativePath; /// use std::path::Path; /// /// let path = RelativePath::new("..").to_logical_path("."); /// assert_eq!(path, Path::new("")); /// ``` /// /// # Examples /// /// ```rust /// use relative_path::RelativePath; /// use std::path::Path; /// /// let path = RelativePath::new("..").to_logical_path("foo/bar"); /// assert_eq!(path, Path::new("foo")); /// ``` /// /// # Encoding an absolute path /// /// Behaves the same as [to_path][RelativePath::to_path] when encoding /// absolute paths. /// /// Absolute paths are, in contrast to when using [PathBuf::push] *ignored* /// and will be added unchanged to the buffer. /// /// This is to preserve the probability of a path conversion failing if the /// relative path contains platform-specific absolute path components. /// /// ```rust /// use relative_path::RelativePath; /// use std::path::Path; /// /// if cfg!(windows) { /// assert_eq!( /// Path::new("foo\\bar\\baz"), /// RelativePath::new("/bar/baz").to_logical_path("foo") /// ); /// /// assert_eq!( /// Path::new("foo\\c:\\bar\\baz"), /// RelativePath::new("c:\\bar\\baz").to_logical_path("foo") /// ); /// } /// /// if cfg!(unix) { /// assert_eq!( /// Path::new("foo/bar/baz"), /// RelativePath::new("/bar/baz").to_logical_path("foo") /// ); /// /// assert_eq!( /// Path::new("foo/c:\\bar\\baz"), /// RelativePath::new("c:\\bar\\baz").to_logical_path("foo") /// ); /// } /// ``` /// /// [PathBuf]: https://doc.rust-lang.org/std/path/struct.PathBuf.html /// [PathBuf::push]: https://doc.rust-lang.org/std/path/struct.PathBuf.html#method.push pub fn to_logical_path<P: AsRef<path::Path>>(&self, base: P) -> path::PathBuf { use self::Component::*; let mut p = base.as_ref().to_path_buf().into_os_string(); for c in self.components() { match c { CurDir => continue, ParentDir => { let mut temp = path::PathBuf::from(std::mem::take(&mut p)); temp.pop(); p = temp.into_os_string(); } Normal(c) => { p.push(path::MAIN_SEPARATOR.encode_utf8(&mut [0u8, 0u8, 0u8, 0u8])); p.push(c); } } } path::PathBuf::from(p) } /// Returns a relative path, without its final [Component] if there is one. /// /// # Examples /// /// ```rust /// use relative_path::RelativePath; /// /// assert_eq!(Some(RelativePath::new("foo")), RelativePath::new("foo/bar").parent()); /// assert_eq!(Some(RelativePath::new("")), RelativePath::new("foo").parent()); /// assert_eq!(None, RelativePath::new("").parent()); /// ``` pub fn parent(&self) -> Option<&RelativePath> { use self::Component::*; if self.inner.is_empty() { return None; } let mut it = self.components(); while let Some(CurDir) = it.next_back() {} Some(it.as_relative_path()) } /// Returns the final component of the `RelativePath`, if there is one. /// /// If the path is a normal file, this is the file name. If it's the path of a directory, this /// is the directory name. /// /// Returns [None] If the path terminates in `..`. /// /// [None]: https://doc.rust-lang.org/std/option/enum.Option.html /// /// # Examples /// /// ``` /// use relative_path::RelativePath; /// /// assert_eq!(Some("bin"), RelativePath::new("usr/bin/").file_name()); /// assert_eq!(Some("foo.txt"), RelativePath::new("tmp/foo.txt").file_name()); /// assert_eq!(Some("foo.txt"), RelativePath::new("tmp/foo.txt/").file_name()); /// assert_eq!(Some("foo.txt"), RelativePath::new("foo.txt/.").file_name()); /// assert_eq!(Some("foo.txt"), RelativePath::new("foo.txt/.//").file_name()); /// assert_eq!(None, RelativePath::new("foo.txt/..").file_name()); /// assert_eq!(None, RelativePath::new("/").file_name()); /// ``` pub fn file_name(&self) -> Option<&str> { use self::Component::*; let mut it = self.components(); while let Some(c) = it.next_back() { return match c { CurDir => continue, Normal(name) => Some(name), _ => None, }; } None } /// Returns a relative path that, when joined onto `base`, yields `self`. /// /// # Errors /// /// If `base` is not a prefix of `self` (i.e. [starts_with] /// returns `false`), returns [Err]. /// /// [starts_with]: Self::starts_with /// [Err]: https://doc.rust-lang.org/std/result/enum.Result.html /// /// # Examples /// /// ``` /// use relative_path::RelativePath; /// /// let path = RelativePath::new("test/haha/foo.txt"); /// /// assert_eq!(path.strip_prefix("test"), Ok(RelativePath::new("haha/foo.txt"))); /// assert_eq!(path.strip_prefix("test").is_ok(), true); /// assert_eq!(path.strip_prefix("haha").is_ok(), false); /// ``` pub fn strip_prefix<P: AsRef<RelativePath>>( &self, base: P, ) -> Result<&RelativePath, StripPrefixError> { iter_after(self.components(), base.as_ref().components()) .map(|c| c.as_relative_path()) .ok_or(StripPrefixError(())) } /// Determines whether `base` is a prefix of `self`. /// /// Only considers whole path components to match. /// /// # Examples /// /// ``` /// use relative_path::RelativePath; /// /// let path = RelativePath::new("etc/passwd"); /// /// assert!(path.starts_with("etc")); /// /// assert!(!path.starts_with("e")); /// ``` pub fn starts_with<P: AsRef<RelativePath>>(&self, base: P) -> bool { iter_after(self.components(), base.as_ref().components()).is_some() } /// Determines whether `child` is a suffix of `self`. /// /// Only considers whole path components to match. /// /// # Examples /// /// ``` /// use relative_path::RelativePath; /// /// let path = RelativePath::new("etc/passwd"); /// /// assert!(path.ends_with("passwd")); /// ``` pub fn ends_with<P: AsRef<RelativePath>>(&self, child: P) -> bool { iter_after(self.components().rev(), child.as_ref().components().rev()).is_some() } /// Creates an owned [RelativePathBuf] like `self` but with the given file name. /// /// See [set_file_name] for more details. /// /// [set_file_name]: RelativePathBuf::set_file_name /// /// # Examples /// /// ``` /// use relative_path::{RelativePath, RelativePathBuf}; /// /// let path = RelativePath::new("tmp/foo.txt"); /// assert_eq!(path.with_file_name("bar.txt"), RelativePathBuf::from("tmp/bar.txt")); /// /// let path = RelativePath::new("tmp"); /// assert_eq!(path.with_file_name("var"), RelativePathBuf::from("var")); /// ``` pub fn with_file_name<S: AsRef<str>>(&self, file_name: S) -> RelativePathBuf { let mut buf = self.to_relative_path_buf(); buf.set_file_name(file_name); buf } /// Extracts the stem (non-extension) portion of [file_name]. /// /// [file_name]: Self::file_name /// /// The stem is: /// /// * [None], if there is no file name; /// * The entire file name if there is no embedded `.`; /// * The entire file name if the file name begins with `.` and has no other `.`s within; /// * Otherwise, the portion of the file name before the final `.` /// /// [None]: https://doc.rust-lang.org/std/option/enum.Option.html /// /// # Examples /// /// ``` /// use relative_path::RelativePath; /// /// let path = RelativePath::new("foo.rs"); /// /// assert_eq!("foo", path.file_stem().unwrap()); /// ``` pub fn file_stem(&self) -> Option<&str> { self.file_name() .map(split_file_at_dot) .and_then(|(before, after)| before.or(after)) } /// Extracts the extension of [file_name], if possible. /// /// The extension is: /// /// * [None], if there is no file name; /// * [None], if there is no embedded `.`; /// * [None], if the file name begins with `.` and has no other `.`s within; /// * Otherwise, the portion of the file name after the final `.` /// /// [file_name]: Self::file_name /// [None]: https://doc.rust-lang.org/std/option/enum.Option.html /// /// # Examples /// /// ``` /// use relative_path::RelativePath; /// /// assert_eq!(Some("rs"), RelativePath::new("foo.rs").extension()); /// assert_eq!(None, RelativePath::new(".rs").extension()); /// assert_eq!(Some("rs"), RelativePath::new("foo.rs/.").extension()); /// ``` pub fn extension(&self) -> Option<&str> { self.file_name() .map(split_file_at_dot) .and_then(|(before, after)| before.and(after)) } /// Creates an owned [RelativePathBuf] like `self` but with the given extension. /// /// See [set_extension] for more details. /// /// [set_extension]: RelativePathBuf::set_extension /// /// # Examples /// /// ``` /// use relative_path::{RelativePath, RelativePathBuf}; /// /// let path = RelativePath::new("foo.rs"); /// assert_eq!(path.with_extension("txt"), RelativePathBuf::from("foo.txt")); /// ``` pub fn with_extension<S: AsRef<str>>(&self, extension: S) -> RelativePathBuf { let mut buf = self.to_relative_path_buf(); buf.set_extension(extension); buf } /// Build an owned [RelativePathBuf], joined with the given path and normalized. /// /// # Examples /// /// ```rust /// use relative_path::RelativePath; /// /// assert_eq!( /// RelativePath::new("foo/baz.txt"), /// RelativePath::new("foo/bar").join_normalized("../baz.txt").as_relative_path() /// ); /// /// assert_eq!( /// RelativePath::new("../foo/baz.txt"), /// RelativePath::new("../foo/bar").join_normalized("../baz.txt").as_relative_path() /// ); /// ``` pub fn join_normalized<P: AsRef<RelativePath>>(&self, path: P) -> RelativePathBuf { let mut stack = Vec::new(); relative_traversal(&mut stack, self.components()); relative_traversal(&mut stack, path.as_ref().components()); RelativePathBuf::from(stack.join("/")) } /// Return an owned [RelativePathBuf], with all non-normal components moved to the beginning of /// the path. /// /// This permits for a normalized representation of different relative components. /// /// Normalization is a _destructive_ operation if the path references an actual filesystem /// path. /// An example of this is symlinks under unix, a path like `foo/../bar` might reference a /// different location other than `./bar`. /// /// Normalization is a logical operation that is only valid if the relative path is part of /// some context which doesn't have semantics that causes it to break, like symbolic links. /// /// # Examples /// /// ```rust /// use relative_path::RelativePath; /// /// assert_eq!( /// "../foo/baz.txt", /// RelativePath::new("../foo/./bar/../baz.txt").normalize() /// ); /// /// assert_eq!( /// "", /// RelativePath::new(".").normalize() /// ); /// ``` pub fn normalize(&self) -> RelativePathBuf { let mut stack = Vec::new(); relative_traversal(&mut stack, self.components()); RelativePathBuf::from(stack.join("/")) } /// Constructs a relative path from the current path, to `path`. /// /// # Examples /// /// ```rust /// use relative_path::RelativePath; /// /// assert_eq!( /// "../../e/f", /// RelativePath::new("a/b/c/d").relative(RelativePath::new("a/b/e/f")) /// ); /// /// assert_eq!( /// "../bbb", /// RelativePath::new("a/../aaa").relative(RelativePath::new("b/../bbb")) /// ); /// /// let p = RelativePath::new("git/relative-path"); /// let r = RelativePath::new("git"); /// assert_eq!("relative-path", r.relative(p)); /// assert_eq!("..", p.relative(r)); /// /// let p = RelativePath::new("../../git/relative-path"); /// let r = RelativePath::new("git"); /// assert_eq!("../../../git/relative-path", r.relative(p)); /// assert_eq!("", p.relative(r)); /// /// let a = RelativePath::new("foo/bar/bap/foo.h"); /// let b = RelativePath::new("../arch/foo.h"); /// assert_eq!("../../../../../arch/foo.h", a.relative(b)); /// assert_eq!("", b.relative(a)); /// ``` pub fn relative<P: AsRef<RelativePath>>(&self, path: P) -> RelativePathBuf { let mut from = Vec::new(); let mut to = Vec::new(); relative_traversal(&mut from, self.components()); relative_traversal(&mut to, path.as_ref().components()); // Special case: The path we are traversing from can't contain unnamed // components. A relative path might be any path, like `/`, or // `/foo/bar/baz`, and these components cannot be named in the relative // traversal. // // Also note that `relative_traversal` guarantees that all ParentDir // components are at the head of the stack. if !from.is_empty() && from[0] == PARENT_STR { return RelativePathBuf::new(); } let mut from = from.into_iter(); let mut to = to.into_iter(); // keep track of the last component tracked in to, since we need to // append it after we've identified common components. let tail; let mut buffer = RelativePathBuf::new(); // strip common prefixes loop { match (from.next(), to.next()) { (Some(from), Some(to)) if from == to => continue, (from, to) => { if from.is_some() { buffer.push(PARENT_STR); } tail = to; break; } } } for c in from.map(|_| PARENT_STR).chain(tail).chain(to) { buffer.push(c); } buffer } /// Check if path starts with a path separator. fn starts_with_sep(&self) -> bool { self.inner.starts_with(SEP) } /// Check if path ends with a path separator. fn ends_with_sep(&self) -> bool { self.inner.ends_with(SEP) } } impl ToOwned for RelativePath { type Owned = RelativePathBuf; fn to_owned(&self) -> RelativePathBuf { self.to_relative_path_buf() } } impl fmt::Debug for RelativePath { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{:?}", &self.inner) } } impl AsRef<str> for RelativePathBuf { fn as_ref(&self) -> &str { &self.inner } } impl AsRef<RelativePath> for String { fn as_ref(&self) -> &RelativePath { RelativePath::new(self) } } impl AsRef<RelativePath> for str { fn as_ref(&self) -> &RelativePath { RelativePath::new(self) } } impl AsRef<RelativePath> for RelativePath { fn as_ref(&self) -> &RelativePath { self } } impl cmp::PartialEq for RelativePath { fn eq(&self, other: &RelativePath) -> bool { self.components() == other.components() } } impl cmp::Eq for RelativePath {} impl cmp::PartialOrd for RelativePath { fn partial_cmp(&self, other: &RelativePath) -> Option<cmp::Ordering> { self.components().partial_cmp(other.components()) } } impl cmp::Ord for RelativePath { fn cmp(&self, other: &RelativePath) -> cmp::Ordering { self.components().cmp(other.components()) } } impl Hash for RelativePath { fn hash<H: Hasher>(&self, h: &mut H) { for c in self.components() { c.hash(h); } } } impl fmt::Display for RelativePath { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.inner, f) } } impl fmt::Display for RelativePathBuf { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.inner, f) } } /// Helper struct for printing relative paths. /// /// This is not strictly necessary in the same sense as it is for [Display], /// because relative paths are guaranteed to be valid UTF-8. But the behavior /// is preserved to simplify the transition between [Path] and [RelativePath]. /// /// [Path]: https://doc.rust-lang.org/std/path/struct.Path.html /// [Display]: https://doc.rust-lang.org/std/fmt/trait.Display.html pub struct Display<'a> { path: &'a RelativePath, } impl<'a> fmt::Debug for Display<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&self.path, f) } } impl<'a> fmt::Display for Display<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.path, f) } } #[cfg(feature = "serde")] impl serde::ser::Serialize for RelativePathBuf { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::ser::Serializer, { serializer.serialize_str(&self.inner) } } #[cfg(feature = "serde")] impl<'de> serde::de::Deserialize<'de> for RelativePathBuf { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::de::Deserializer<'de>, { struct RelativePathBufVisitor; impl<'de> serde::de::Visitor<'de> for RelativePathBufVisitor { type Value = RelativePathBuf; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a relative path") } fn visit_string<E>(self, input: String) -> Result<Self::Value, E> where E: serde::de::Error, { Ok(RelativePathBuf::from(input)) } fn visit_str<E>(self, input: &str) -> Result<Self::Value, E> where E: serde::de::Error, { Ok(RelativePathBuf::from(input.to_owned())) } } deserializer.deserialize_any(RelativePathBufVisitor) } } #[cfg(feature = "serde")] impl serde::ser::Serialize for RelativePath { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::ser::Serializer, { serializer.serialize_str(&self.inner) } } macro_rules! impl_cmp { ($lhs:ty, $rhs:ty) => { impl<'a, 'b> PartialEq<$rhs> for $lhs { #[inline] fn eq(&self, other: &$rhs) -> bool { <RelativePath as PartialEq>::eq(self, other) } } impl<'a, 'b> PartialEq<$lhs> for $rhs { #[inline] fn eq(&self, other: &$lhs) -> bool { <RelativePath as PartialEq>::eq(self, other) } } impl<'a, 'b> PartialOrd<$rhs> for $lhs { #[inline] fn partial_cmp(&self, other: &$rhs) -> Option<cmp::Ordering> { <RelativePath as PartialOrd>::partial_cmp(self, other) } } impl<'a, 'b> PartialOrd<$lhs> for $rhs { #[inline] fn partial_cmp(&self, other: &$lhs) -> Option<cmp::Ordering> { <RelativePath as PartialOrd>::partial_cmp(self, other) } } }; } impl_cmp!(RelativePathBuf, RelativePath); impl_cmp!(RelativePathBuf, &'a RelativePath); impl_cmp!(Cow<'a, RelativePath>, RelativePath); impl_cmp!(Cow<'a, RelativePath>, &'b RelativePath); impl_cmp!(Cow<'a, RelativePath>, RelativePathBuf); macro_rules! impl_cmp_str { ($lhs:ty, $rhs:ty) => { impl<'a, 'b> PartialEq<$rhs> for $lhs { #[inline] fn eq(&self, other: &$rhs) -> bool { <RelativePath as PartialEq>::eq(self, other.as_ref()) } } impl<'a, 'b> PartialEq<$lhs> for $rhs { #[inline] fn eq(&self, other: &$lhs) -> bool { <RelativePath as PartialEq>::eq(self.as_ref(), other) } } impl<'a, 'b> PartialOrd<$rhs> for $lhs { #[inline] fn partial_cmp(&self, other: &$rhs) -> Option<cmp::Ordering> { <RelativePath as PartialOrd>::partial_cmp(self, other.as_ref()) } } impl<'a, 'b> PartialOrd<$lhs> for $rhs { #[inline] fn partial_cmp(&self, other: &$lhs) -> Option<cmp::Ordering> { <RelativePath as PartialOrd>::partial_cmp(self.as_ref(), other) } } }; } impl_cmp_str!(RelativePathBuf, str); impl_cmp_str!(RelativePathBuf, &'a str); impl_cmp_str!(RelativePathBuf, String); impl_cmp_str!(RelativePath, str); impl_cmp_str!(RelativePath, &'a str); impl_cmp_str!(RelativePath, String); impl_cmp_str!(&'a RelativePath, str); impl_cmp_str!(&'a RelativePath, String); #[cfg(test)] mod tests { use super::*; use std::path::Path; macro_rules! t( ($path:expr, iter: $iter:expr) => ( { let path = RelativePath::new($path); // Forward iteration let comps = path.iter().map(str::to_string).collect::<Vec<String>>(); let exp: &[&str] = &$iter; let exps = exp.iter().map(|s| s.to_string()).collect::<Vec<String>>(); assert!(comps == exps, "iter: Expected {:?}, found {:?}", exps, comps); // Reverse iteration let comps = RelativePath::new($path).iter().rev().map(str::to_string) .collect::<Vec<String>>(); let exps = exps.into_iter().rev().collect::<Vec<String>>(); assert!(comps == exps, "iter().rev(): Expected {:?}, found {:?}", exps, comps); } ); ($path:expr, parent: $parent:expr, file_name: $file:expr) => ( { let path = RelativePath::new($path); let parent = path.parent().map(|p| p.as_str()); let exp_parent: Option<&str> = $parent; assert!(parent == exp_parent, "parent: Expected {:?}, found {:?}", exp_parent, parent); let file = path.file_name(); let exp_file: Option<&str> = $file; assert!(file == exp_file, "file_name: Expected {:?}, found {:?}", exp_file, file); } ); ($path:expr, file_stem: $file_stem:expr, extension: $extension:expr) => ( { let path = RelativePath::new($path); let stem = path.file_stem(); let exp_stem: Option<&str> = $file_stem; assert!(stem == exp_stem, "file_stem: Expected {:?}, found {:?}", exp_stem, stem); let ext = path.extension(); let exp_ext: Option<&str> = $extension; assert!(ext == exp_ext, "extension: Expected {:?}, found {:?}", exp_ext, ext); } ); ($path:expr, iter: $iter:expr, parent: $parent:expr, file_name: $file:expr, file_stem: $file_stem:expr, extension: $extension:expr) => ( { t!($path, iter: $iter); t!($path, parent: $parent, file_name: $file); t!($path, file_stem: $file_stem, extension: $extension); } ); ); fn assert_components(components: &[&str], path: &RelativePath) { let components = components .iter() .cloned() .map(Component::Normal) .collect::<Vec<_>>(); let result: Vec<_> = path.components().collect(); assert_eq!(&components[..], &result[..]); } fn rp(input: &str) -> &RelativePath { RelativePath::new(input) } #[test] pub fn test_decompositions() { t!("", iter: [], parent: None, file_name: None, file_stem: None, extension: None ); t!("foo", iter: ["foo"], parent: Some(""), file_name: Some("foo"), file_stem: Some("foo"), extension: None ); t!("/", iter: [], parent: Some(""), file_name: None, file_stem: None, extension: None ); t!("/foo", iter: ["foo"], parent: Some(""), file_name: Some("foo"), file_stem: Some("foo"), extension: None ); t!("foo/", iter: ["foo"], parent: Some(""), file_name: Some("foo"), file_stem: Some("foo"), extension: None ); t!("/foo/", iter: ["foo"], parent: Some(""), file_name: Some("foo"), file_stem: Some("foo"), extension: None ); t!("foo/bar", iter: ["foo", "bar"], parent: Some("foo"), file_name: Some("bar"), file_stem: Some("bar"), extension: None ); t!("/foo/bar", iter: ["foo", "bar"], parent: Some("/foo"), file_name: Some("bar"), file_stem: Some("bar"), extension: None ); t!("///foo///", iter: ["foo"], parent: Some(""), file_name: Some("foo"), file_stem: Some("foo"), extension: None ); t!("///foo///bar", iter: ["foo", "bar"], parent: Some("///foo"), file_name: Some("bar"), file_stem: Some("bar"), extension: None ); t!("./.", iter: [".", "."], parent: Some(""), file_name: None, file_stem: None, extension: None ); t!("/..", iter: [".."], parent: Some(""), file_name: None, file_stem: None, extension: None ); t!("../", iter: [".."], parent: Some(""), file_name: None, file_stem: None, extension: None ); t!("foo/.", iter: ["foo", "."], parent: Some(""), file_name: Some("foo"), file_stem: Some("foo"), extension: None ); t!("foo/..", iter: ["foo", ".."], parent: Some("foo"), file_name: None, file_stem: None, extension: None ); t!("foo/./", iter: ["foo", "."], parent: Some(""), file_name: Some("foo"), file_stem: Some("foo"), extension: None ); t!("foo/./bar", iter: ["foo", ".", "bar"], parent: Some("foo/."), file_name: Some("bar"), file_stem: Some("bar"), extension: None ); t!("foo/../", iter: ["foo", ".."], parent: Some("foo"), file_name: None, file_stem: None, extension: None ); t!("foo/../bar", iter: ["foo", "..", "bar"], parent: Some("foo/.."), file_name: Some("bar"), file_stem: Some("bar"), extension: None ); t!("./a", iter: [".", "a"], parent: Some("."), file_name: Some("a"), file_stem: Some("a"), extension: None ); t!(".", iter: ["."], parent: Some(""), file_name: None, file_stem: None, extension: None ); t!("./", iter: ["."], parent: Some(""), file_name: None, file_stem: None, extension: None ); t!("a/b", iter: ["a", "b"], parent: Some("a"), file_name: Some("b"), file_stem: Some("b"), extension: None ); t!("a//b", iter: ["a", "b"], parent: Some("a"), file_name: Some("b"), file_stem: Some("b"), extension: None ); t!("a/./b", iter: ["a", ".", "b"], parent: Some("a/."), file_name: Some("b"), file_stem: Some("b"), extension: None ); t!("a/b/c", iter: ["a", "b", "c"], parent: Some("a/b"), file_name: Some("c"), file_stem: Some("c"), extension: None ); t!(".foo", iter: [".foo"], parent: Some(""), file_name: Some(".foo"), file_stem: Some(".foo"), extension: None ); } #[test] pub fn test_stem_ext() { t!("foo", file_stem: Some("foo"), extension: None ); t!("foo.", file_stem: Some("foo"), extension: Some("") ); t!(".foo", file_stem: Some(".foo"), extension: None ); t!("foo.txt", file_stem: Some("foo"), extension: Some("txt") ); t!("foo.bar.txt", file_stem: Some("foo.bar"), extension: Some("txt") ); t!("foo.bar.", file_stem: Some("foo.bar"), extension: Some("") ); t!(".", file_stem: None, extension: None); t!("..", file_stem: None, extension: None); t!("", file_stem: None, extension: None); } #[test] pub fn test_set_file_name() { macro_rules! tfn( ($path:expr, $file:expr, $expected:expr) => ( { let mut p = RelativePathBuf::from($path); p.set_file_name($file); assert!(p.as_str() == $expected, "setting file name of {:?} to {:?}: Expected {:?}, got {:?}", $path, $file, $expected, p.as_str()); }); ); tfn!("foo", "foo", "foo"); tfn!("foo", "bar", "bar"); tfn!("foo", "", ""); tfn!("", "foo", "foo"); tfn!(".", "foo", "./foo"); tfn!("foo/", "bar", "bar"); tfn!("foo/.", "bar", "bar"); tfn!("..", "foo", "../foo"); tfn!("foo/..", "bar", "foo/../bar"); tfn!("/", "foo", "/foo"); } #[test] pub fn test_set_extension() { macro_rules! tse( ($path:expr, $ext:expr, $expected:expr, $output:expr) => ( { let mut p = RelativePathBuf::from($path); let output = p.set_extension($ext); assert!(p.as_str() == $expected && output == $output, "setting extension of {:?} to {:?}: Expected {:?}/{:?}, got {:?}/{:?}", $path, $ext, $expected, $output, p.as_str(), output); }); ); tse!("foo", "txt", "foo.txt", true); tse!("foo.bar", "txt", "foo.txt", true); tse!("foo.bar.baz", "txt", "foo.bar.txt", true); tse!(".test", "txt", ".test.txt", true); tse!("foo.txt", "", "foo", true); tse!("foo", "", "foo", true); tse!("", "foo", "", false); tse!(".", "foo", ".", false); tse!("foo/", "bar", "foo.bar", true); tse!("foo/.", "bar", "foo.bar", true); tse!("..", "foo", "..", false); tse!("foo/..", "bar", "foo/..", false); tse!("/", "foo", "/", false); } #[test] fn test_eq_recievers() { use std::borrow::Cow; let borrowed: &RelativePath = RelativePath::new("foo/bar"); let mut owned: RelativePathBuf = RelativePathBuf::new(); owned.push("foo"); owned.push("bar"); let borrowed_cow: Cow<RelativePath> = borrowed.into(); let owned_cow: Cow<RelativePath> = owned.clone().into(); macro_rules! t { ($($current:expr),+) => { $( assert_eq!($current, borrowed); assert_eq!($current, owned); assert_eq!($current, borrowed_cow); assert_eq!($current, owned_cow); )+ } } t!(borrowed, owned, borrowed_cow, owned_cow); } #[test] pub fn test_compare() { use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; fn hash<T: Hash>(t: T) -> u64 { let mut s = DefaultHasher::new(); t.hash(&mut s); s.finish() } macro_rules! tc( ($path1:expr, $path2:expr, eq: $eq:expr, starts_with: $starts_with:expr, ends_with: $ends_with:expr, relative_from: $relative_from:expr) => ({ let path1 = RelativePath::new($path1); let path2 = RelativePath::new($path2); let eq = path1 == path2; assert!(eq == $eq, "{:?} == {:?}, expected {:?}, got {:?}", $path1, $path2, $eq, eq); assert!($eq == (hash(path1) == hash(path2)), "{:?} == {:?}, expected {:?}, got {} and {}", $path1, $path2, $eq, hash(path1), hash(path2)); let starts_with = path1.starts_with(path2); assert!(starts_with == $starts_with, "{:?}.starts_with({:?}), expected {:?}, got {:?}", $path1, $path2, $starts_with, starts_with); let ends_with = path1.ends_with(path2); assert!(ends_with == $ends_with, "{:?}.ends_with({:?}), expected {:?}, got {:?}", $path1, $path2, $ends_with, ends_with); let relative_from = path1.strip_prefix(path2) .map(|p| p.as_str()) .ok(); let exp: Option<&str> = $relative_from; assert!(relative_from == exp, "{:?}.strip_prefix({:?}), expected {:?}, got {:?}", $path1, $path2, exp, relative_from); }); ); tc!("", "", eq: true, starts_with: true, ends_with: true, relative_from: Some("") ); tc!("foo", "", eq: false, starts_with: true, ends_with: true, relative_from: Some("foo") ); tc!("", "foo", eq: false, starts_with: false, ends_with: false, relative_from: None ); tc!("foo", "foo", eq: true, starts_with: true, ends_with: true, relative_from: Some("") ); tc!("foo/", "foo", eq: true, starts_with: true, ends_with: true, relative_from: Some("") ); tc!("foo/bar", "foo", eq: false, starts_with: true, ends_with: false, relative_from: Some("bar") ); tc!("foo/bar/baz", "foo/bar", eq: false, starts_with: true, ends_with: false, relative_from: Some("baz") ); tc!("foo/bar", "foo/bar/baz", eq: false, starts_with: false, ends_with: false, relative_from: None ); } #[test] fn test_join() { assert_components(&["foo", "bar", "baz"], &rp("foo/bar").join("baz///")); assert_components( &["hello", "world", "foo", "bar", "baz"], &rp("hello/world").join("///foo/bar/baz"), ); assert_components(&["foo", "bar", "baz"], &rp("").join("foo/bar/baz")); } #[test] fn test_components_iterator() { use self::Component::*; assert_eq!( vec![Normal("hello"), Normal("world")], rp("/hello///world//").components().collect::<Vec<_>>() ); } #[test] fn test_to_path_buf() { let path = rp("/hello///world//"); let path_buf = path.to_path("."); let expected = Path::new(".").join("hello").join("world"); assert_eq!(expected, path_buf); } #[test] fn test_eq() { assert_eq!(rp("//foo///bar"), rp("/foo/bar")); assert_eq!(rp("foo///bar"), rp("foo/bar")); assert_eq!(rp("foo"), rp("foo")); assert_eq!(rp("foo"), rp("foo").to_relative_path_buf()); } #[test] fn test_next_back() { use self::Component::*; let mut it = rp("baz/bar///foo").components(); assert_eq!(Some(Normal("foo")), it.next_back()); assert_eq!(Some(Normal("bar")), it.next_back()); assert_eq!(Some(Normal("baz")), it.next_back()); assert_eq!(None, it.next_back()); } #[test] fn test_parent() { let path = rp("baz/./bar/foo//./."); assert_eq!(Some(rp("baz/./bar")), path.parent()); assert_eq!( Some(rp("baz/.")), path.parent().and_then(RelativePath::parent) ); assert_eq!( Some(rp("")), path.parent() .and_then(RelativePath::parent) .and_then(RelativePath::parent) ); assert_eq!( None, path.parent() .and_then(RelativePath::parent) .and_then(RelativePath::parent) .and_then(RelativePath::parent) ); } #[test] fn test_relative_path_buf() { assert_eq!( rp("hello/world/."), rp("/hello///world//").to_owned().join(".") ); } #[test] fn test_normalize() { assert_eq!(rp("c/d"), rp("a/.././b/../c/d").normalize()); } #[test] fn test_relative_to() { assert_eq!( rp("foo/foo/bar"), rp("foo/bar").join_normalized("../foo/bar") ); assert_eq!( rp("../c/e"), rp("x/y").join_normalized("../../a/b/../../../c/d/../e") ); } #[test] fn test_from() { assert_eq!( rp("foo/bar").to_owned(), RelativePathBuf::from(String::from("foo/bar")), ); assert_eq!(rp("foo/bar").to_owned(), RelativePathBuf::from("foo/bar"),); } #[test] fn test_default() { assert_eq!(RelativePathBuf::new(), RelativePathBuf::default(),); } #[test] pub fn test_push() { macro_rules! tp( ($path:expr, $push:expr, $expected:expr) => ( { let mut actual = RelativePathBuf::from($path); actual.push($push); assert!(actual.as_str() == $expected, "pushing {:?} onto {:?}: Expected {:?}, got {:?}", $push, $path, $expected, actual.as_str()); }); ); tp!("", "foo", "foo"); tp!("foo", "bar", "foo/bar"); tp!("foo/", "bar", "foo/bar"); tp!("foo//", "bar", "foo//bar"); tp!("foo/.", "bar", "foo/./bar"); tp!("foo./.", "bar", "foo././bar"); tp!("foo", "", "foo/"); tp!("foo", ".", "foo/."); tp!("foo", "..", "foo/.."); } #[test] pub fn test_pop() { macro_rules! tp( ($path:expr, $expected:expr, $output:expr) => ( { let mut actual = RelativePathBuf::from($path); let output = actual.pop(); assert!(actual.as_str() == $expected && output == $output, "popping from {:?}: Expected {:?}/{:?}, got {:?}/{:?}", $path, $expected, $output, actual.as_str(), output); }); ); tp!("", "", false); tp!("/", "", true); tp!("foo", "", true); tp!(".", "", true); tp!("/foo", "", true); tp!("/foo/bar", "/foo", true); tp!("/foo/bar/.", "/foo", true); tp!("foo/bar", "foo", true); tp!("foo/.", "", true); tp!("foo//bar", "foo", true); } #[test] pub fn test_display() { // NB: display delegated to the underlying string. assert_eq!(RelativePathBuf::from("foo/bar").to_string(), "foo/bar"); assert_eq!(RelativePath::new("foo/bar").to_string(), "foo/bar"); assert_eq!(format!("{}", RelativePathBuf::from("foo/bar")), "foo/bar"); assert_eq!(format!("{}", RelativePath::new("foo/bar")), "foo/bar"); } #[cfg(unix)] #[test] pub fn test_unix_from_path() { use std::ffi::OsStr; use std::os::unix::ffi::OsStrExt; assert_eq!( Err(FromPathErrorKind::NonRelative.into()), RelativePath::from_path("/foo/bar") ); // Continuation byte without continuation. let non_utf8 = OsStr::from_bytes(&[0x80u8]); assert_eq!( Err(FromPathErrorKind::NonUtf8.into()), RelativePath::from_path(non_utf8) ); } #[cfg(windows)] #[test] pub fn test_windows_from_path() { assert_eq!( Err(FromPathErrorKind::NonRelative.into()), RelativePath::from_path("c:\\foo\\bar") ); assert_eq!( Err(FromPathErrorKind::BadSeparator.into()), RelativePath::from_path("foo\\bar") ); } #[cfg(unix)] #[test] pub fn test_unix_owned_from_path() { use std::ffi::OsStr; use std::os::unix::ffi::OsStrExt; assert_eq!( Err(FromPathErrorKind::NonRelative.into()), RelativePathBuf::from_path(Path::new("/foo/bar")) ); // Continuation byte without continuation. let non_utf8 = OsStr::from_bytes(&[0x80u8]); assert_eq!( Err(FromPathErrorKind::NonUtf8.into()), RelativePathBuf::from_path(Path::new(non_utf8)) ); } #[cfg(windows)] #[test] pub fn test_windows_owned_from_path() { assert_eq!( Err(FromPathErrorKind::NonRelative.into()), RelativePathBuf::from_path(Path::new("c:\\foo\\bar")) ); } }
use hydroflow::hydroflow_syntax; fn main() { let mut df = hydroflow_syntax! { pivot = union() -> tee(); x_0 = [0]pivot; x_1 = [1]pivot; x_0 -> [0]x_0; x_1[0] -> [1]x_1; // Error: `[1][1]pivot` }; df.run_available(); }
#[doc = "Reader of register INTERP1_POP_LANE0"] pub type R = crate::R<u32, super::INTERP1_POP_LANE0>; impl R {}
use std::convert::TryFrom; use std::fmt; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum Code { MemInc, MemDec, PtrInc, PtrDec, SysWrite, SysRead, LoopStart, LoopEnd, } impl fmt::Display for Code { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "{}", match self { Self::MemInc => '+', Self::MemDec => '-', Self::PtrInc => '>', Self::PtrDec => '<', Self::SysWrite => '.', Self::SysRead => ',', Self::LoopStart => '[', Self::LoopEnd => ']', } ) } } impl TryFrom<char> for Code { type Error = (); fn try_from(from: char) -> Result<Self, ()> { let c = match from { '+' => Self::MemInc, '-' => Self::MemDec, '>' => Self::PtrInc, '<' => Self::PtrDec, '.' => Self::SysWrite, ',' => Self::SysRead, '[' => Self::LoopStart, ']' => Self::LoopEnd, _ => return Err(()), }; Ok(c) } }
use std::io; fn parse_ints(mut line: String) -> Vec<usize> { let mut mem = Vec::with_capacity(100); let mut value = String::new(); line.push(','); for c in line.chars() { match c { ',' => { let int = value.trim().parse().unwrap(); value.clear(); mem.push(int); }, _ => value.push(c) } } mem } fn run(mem: &mut [usize]) { for i in (0..mem.len()).step_by(4) { if mem[i] == 99 { return; } let a = mem[mem[i + 1]]; let b = mem[mem[i + 2]]; let dest = mem[i + 3]; let res = match mem[i] { 1 => a + b, 2 => a * b, _ => panic!("invalid opcode") }; mem[dest] = res; } } fn part_one() { let mut line = String::with_capacity(256); io::stdin().read_line(&mut line).unwrap(); let mut ints = parse_ints(line); run(&mut ints); println!("\n{:?}", ints); } fn part_two() { let program = vec![1,0,0,3,1,1,2,3,1,3,4,3,1,5,0,3,2,10,1,19,1,19,5,23,1,23,9,27,2,27,6,31,1,31,6,35,2,35,9,39,1,6,39,43,2,10,43,47,1,47,9,51,1,51,6,55,1,55,6,59,2,59,10,63,1,6,63,67,2,6,67,71,1,71,5,75,2,13,75,79,1,10,79,83,1,5,83,87,2,87,10,91,1,5,91,95,2,95,6,99,1,99,6,103,2,103,6,107,2,107,9,111,1,111,5,115,1,115,6,119,2,6,119,123,1,5,123,127,1,127,13,131,1,2,131,135,1,135,10,0,99,2,14,0,0]; let target = 19690720; for noun in 0..100 { for verb in 0..100 { let mut ints = program.clone(); ints[1] = noun; ints[2] = verb; run(&mut ints); if ints[0] == target { println!("noun={}, verb={}", noun, verb); std::process::exit(0); } } } } fn main() { part_two(); } #[cfg(test)] mod tests { use super::*; #[test] fn day2_test1() { let mut ints = parse_ints("1,0,0,0,99".to_string()); run(&mut ints); assert_eq!(ints, vec![2,0,0,0,99]) } #[test] fn day2_test2() { let mut ints = parse_ints("2,3,0,3,99".to_string()); run(&mut ints); assert_eq!(ints, vec![2,3,0,6,99]) } #[test] fn day2_test3() { let mut ints = parse_ints("2,4,4,5,99,0".to_string()); run(&mut ints); assert_eq!(ints, vec![2,4,4,5,99,9801]) } #[test] fn day2_test4() { let mut ints = parse_ints("1,1,1,4,99,5,6,0,99".to_string()); run(&mut ints); assert_eq!(ints, vec![30,1,1,4,2,5,6,0,99]) } }
//! ITP1_1_A の回答。 //! [https://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ITP1_1_A](https://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ITP1_1_A) /// ITP1_1_A の回答 エントリポイント。 /// 標準出力に`"Hello World"`を出力する。 #[allow(dead_code)] pub fn main() { println!("Hello World") }
extern crate ndarray; use crate::params::Update; use crate::types::*; use ndarray::{ Array, Array1, Array2, Array3, ArrayView1, Axis, Dimension, Ix2, Ix3, RemoveAxis, Slice, }; use ndarray_rand::rand_distr::{StandardNormal, Uniform}; use ndarray_rand::RandomExt; use std::collections::HashMap; pub fn preprocess(text: &str) -> (Vec<usize>, HashMap<String, usize>, HashMap<usize, String>) { let words = text .to_lowercase() .replace(".", " .") .split(' ') .map(|s| s.to_string()) .collect::<Vec<String>>(); let mut word_id = HashMap::new(); let mut id_word = HashMap::new(); let mut id = -1; for word in &words { word_id.entry(word.clone()).or_insert_with(|| { id += 1; id_word.insert(id as usize, word.clone()); id as usize }); } let corpus = words.iter().map(|w| word_id[w]).collect::<Vec<usize>>(); (corpus, word_id, id_word) } /// コーパス(id化された単語列(文章))を受け取って、単語列とコンテキスト列を返す pub fn create_contexts_target( corpus: &Vec<usize>, window_size: usize, ) -> (Vec<Vec<usize>>, Vec<usize>) { let m = window_size * 2 + 1; // 各単語のコンテキストの長さ(その単語を含む) let n = match corpus.len().checked_sub(window_size * 2) { // コンテキストを持つ単語の長さ Some(x) => x, None => { panic!("window_size too large for the corpus!"); } }; let contexts_target = (0..n) .map(|i| (&corpus[i..(i + m)]).to_vec()) .collect::<Vec<Vec<usize>>>(); let target = contexts_target .iter() .map(|v| v[window_size]) .collect::<Vec<usize>>(); let contexts = contexts_target .iter() .map(|v| ([&v[0..window_size], &v[window_size + 1..m]].concat())) .collect::<Vec<Vec<usize>>>(); (contexts, target) } pub fn create_contexts_target_arr( corpus: &Vec<usize>, window_size: usize, ) -> (Array2<usize>, Array1<usize>) { let m = window_size * 2 + 1; // 各単語のコンテキストの長さ(その単語を含む) let n = match corpus.len().checked_sub(window_size * 2) { // コンテキストを持つ単語の長さ Some(x) => x, None => { panic!("window_size too large for the corpus!"); } }; let contexts_target = Array2::from_shape_fn((n, m), |(i, j)| corpus[i + j]); let target = contexts_target.index_axis(Axis(1), window_size).to_owned(); let context_window: Vec<usize> = (0..m).filter(|i| *i != window_size).collect(); let contexts = pickup(&contexts_target, Axis(1), &context_window[..]); (contexts, target) } pub fn convert_one_hot_1v(corpus: &Vec<usize>, vocab_size: usize) -> Vec<Vec<i32>> { corpus .iter() .map(|i| { let mut v = vec![0; vocab_size]; v[*i] = 1; v }) .collect::<Vec<Vec<i32>>>() } pub fn convert_one_hot_2v(corpus: &Vec<Vec<usize>>, vocab_size: usize) -> Vec<Vec<Vec<i32>>> { corpus .iter() .map(|v| convert_one_hot_1v(v, vocab_size)) .collect::<Vec<Vec<Vec<i32>>>>() } pub fn convert_one_hot_1(corpus: &Vec<usize>, vocab_size: usize) -> Array2<f32> { let text_len = corpus.len(); let mut arr = Array2::zeros((text_len, vocab_size)); for (n, &id) in corpus.iter().enumerate() { arr[[n, id]] = 1.0; } arr } pub fn convert_one_hot_2(corpus: &Vec<Vec<usize>>, vocab_size: usize) -> Array3<f32> { let text_len = corpus.len(); let context_len = match corpus.get(0) { Some(c) => c.len(), None => panic!("KAMO: corpus len is zero! therefor context len unknown!"), }; let mut arr = Array3::zeros((text_len, context_len, vocab_size)); for (mut v, word) in arr.axis_iter_mut(Axis(0)).zip(corpus.iter()) { v.assign(&convert_one_hot_1(word, vocab_size)); } arr } use rand::seq::SliceRandom; use rand::thread_rng; pub fn random_index(range: usize) -> Vec<usize> { let mut vec: Vec<usize> = (0..range).collect(); vec.shuffle(&mut thread_rng()); vec } // ↓うまくいかない...。 // 配列のジェネリック型として[T; N] (Nはusize)みたいなのができて然るべきだと思うのだが、できない。 // (T, T)みたいなtuple使おうすとると、v[i][j]的なアクセスができない。 // use std::ops::Index; // pub fn vec_to_array<T, U>(v: Vec<U>, n: usize) -> Array2<T> // where // U: Index<T> + Sized, // { // Array2::from_shape_fn((v.len(), n), |(i, j)| v[i][j]); // } pub fn randarr2d(m: usize, n: usize) -> Arr2d { Array::<f32, _>::random((m, n), StandardNormal) } pub fn randarr1d(m: usize) -> Arr1d { Array::<f32, _>::random((m,), StandardNormal) } pub fn randarr<D: Dimension>(dim: &[usize]) -> Array<f32, D> { Array::<f32, _>::random(dim, StandardNormal) .into_dimensionality() .unwrap() } extern crate num_traits; use num_traits::Zero; /// 元の行列より小さくなる(次元はそのまま) pub fn pickup<T: Zero + Copy, D: RemoveAxis>( x: &Array<T, D>, axis: Axis, idx: &[usize], ) -> Array<T, D> { assert!( x.shape()[axis.0] >= idx.len(), "KAMO: sorry, haven't implemented yet" ); // 同じ型で、一番外側の次元数だけidx.len()にしたいのだけど、現状では小さくすることしかできない。 let mut a = x .slice_axis(axis, Slice::from(0..idx.len())) .mapv(|_| T::zero()); // .to_owned(); // これならT::zero()は不要 for (i, j) in idx.iter().enumerate() { let mut row = a.index_axis_mut(axis, i); // 移動先 let base = &x.index_axis(axis, *j); // 移動元 row.assign(base); } a } /// idxとしてArrayView1を受け取る。 /// pickup関数とほぼ同じ pub fn pickup1<T: Zero + Copy, D: RemoveAxis>( x: &Array<T, D>, axis: Axis, idx: ArrayView1<usize>, ) -> Array<T, D> { assert!( x.shape()[axis.0] >= idx.len(), "KAMO: sorry, haven't implemented yet" ); let mut a = x .slice_axis(axis, Slice::from(0..idx.len())) .mapv(|_| T::zero()); for (i, j) in idx.iter().enumerate() { let mut row = a.index_axis_mut(axis, i); // 移動先 let base = &x.index_axis(axis, *j); // 移動元 row.assign(base); } a } // もう使ってないのに、overrideしてしまったからか、何度も文句を言われる // pub trait Len { // fn len(&self) -> usize; // } // impl Len for ArrayView1<'_, usize> { // fn len(&self) -> usize { // self.len() // } // } // impl Len for &[usize] { // fn len(&self) -> usize { // self.len() // } // } // IntoIterも traitにしようとしたが...。 // pub trait LenIter<'a> { // type IntoIter: Iterator<Item = &'a usize>; // fn len(&self) -> usize; // fn iter(&'a self) -> Self::IntoIter; // } // impl<'a> LenIter<'a> for &[usize] { // type IntoIter = std::slice::Iter<'a, usize>; // fn len(&self) -> usize { // self.len() // } // fn iter(&'a self) -> Self::IntoIter { // self.iter() // } // } // impl<'a> LenIter<'a> for ArrayView1<'a, usize> { // type IntoIter = ndarray::iter::Iter<'a, usize, ndarray::Ix1>; // fn len(&self) -> usize { // self.len() // } // fn iter(&'a self) -> Self::IntoIter { // self.iter() // } // } /// Len traitを作ってジェネリックにしてみた /// idxとして、&[T]は行けるのに、&[T; N](N=1,2,3...)はダメと言われる。 /// idx: &[T]にしてたら&[T; 3] (idx = &[1,2,3]とか)問題なく受け入れるのに、なんでだろう。 /// これ使うと結局stack-over-flowと言われた。なんでだろ。 // pub fn pickup0<'a, T: Zero + Copy, D: RemoveAxis>( // x: &Array<T, D>, // axis: Axis, // idx: impl IntoIterator<Item = &'a usize> + Len, // // idx: LenIter<'a>, // ) -> Array<T, D> { // assert!( // x.shape()[axis.0] >= idx.len(), // "KAMO: sorry, haven't implemented yet" // ); // // 同じ型で、一番外側の次元数だけidx.len()にしたいのだけど、現状では小さくすることしかできない。 // let mut a = x // .slice_axis(axis, Slice::from(0..idx.len())) // .mapv(|_| T::zero()); // // .to_owned(); // これならT::zero()は不要 // for (i, j) in idx.into_iter().enumerate() { // let mut row = a.index_axis_mut(axis, i); // 移動先 // let base = &x.index_axis(axis, *j); // 移動元 // row.assign(base); // } // a // } /// 元の次元より大きくもできる fn pickup2<T: Copy + Zero, D: RemoveAxis>( x: &Array<T, D>, axis: Axis, idx: &[usize], ) -> Array<T, D> { // assert!(true); // idxのrangeとか色々調べた方が良さそうだな。 let mut s = x.shape().to_vec(); s[axis.0] = idx.len(); // 本当は、array作ってからD型にconvertするより、s: shapeの時点でD型にしときたいが、やり方わからず。 // なお、T: Zeroについては、aが初期化さえできれば良いので、zero出なくて、何か初期値があれば良い、多分 let mut a = Array::zeros(s).into_dimensionality::<D>().expect("no way!"); for (mut row, i) in a.axis_iter_mut(axis).zip(idx.iter()) { row.assign(&x.index_axis(axis, *i)); } a } pub fn pickup_old<T: Copy, D: Dimension>(x: &Array<T, D>, idx: &[usize]) -> Array<T, D> { let x = x.view(); let (data_len, input_dim) = match x.shape() { &[a, _, b] => (a, b), &[a, b] => (a, b), _ => panic!("KAMO: dimension of x must be 2 or 3 in model.fit!"), }; let dim = x.slice_axis(Axis(0), Slice::from(0..idx.len())).dim(); match x.ndim() { 2 => x .into_dimensionality::<Ix2>() .map(|newx| { Array::from_shape_fn((idx.len(), input_dim), |(i, j)| newx[[idx[i], j]]) .into_shape(dim) .expect("no way!") }) .expect("no way!"), 3 => { let channel_num = x.shape()[1]; let newx = x.into_dimensionality::<Ix3>().expect("no way!"); Array::from_shape_fn((idx.len(), channel_num, input_dim), |(i, j, k)| { newx[[idx[i], j, k]] }) .into_shape(dim) .expect("no way!") } _ => panic!("dim must be 2 or 3, for now!"), } } pub fn test_pickup() { let arr = Array::from_shape_fn((3, 4), |(i, j)| i * j); putsl!(arr); // putsl!(pickup0(&arr, Axis(0), &[1, 1, 1])); // => エラー!!なんじゃそりゃ let idx: &[_] = &[1, 2, 3]; // putsl!(pickup0(&arr, Axis(0), idx)); // これは行ける。型指定したので // putsl!(pickup0(&arr, Axis(0), &[1, 1, 1][..])); // 明示的にsliceにしたのでok putsl!(pickup(&arr, Axis(0), &[1, 1, 1, 2, 2])); // そもそも引数が&[usize]なので、型推論される } pub fn replace_item<T: Eq + Clone>(mut v: Vec<T>, prev: T, new: T) -> Vec<T> { for i in v.iter_mut() { if *i == prev { *i = new.clone() } } v } /// 先頭の軸を落とす pub fn remove_axis<T, D: RemoveAxis>(mut a: Array<T, D>) -> Array<T, D::Smaller> { // let mut d = a.shape().to_vec(); // let f = d.remove(1); // d[0] *= f; // a.into_shape(d).unwrap().into_dimensionality().unwrap() // merge_axesは、データの並び順を変えない場合に限って実行できる。 // 隣り合ったaxis同士なら可能で、外側の長さが1になる。 // 隣接していない場合は,間の軸がすべて長さ1の場合のみ実行可能。 // a.dim() = (3, 1, 1, 1, 5) -> Axis(0)からAxis(4)でok assert!(a.merge_axes(Axis(0), Axis(1)), "this must never happen!"); a.index_axis_move(Axis(0), 0) } pub fn test_train_split<T: Zero + Copy, D: RemoveAxis>( x: Array<T, D>, t: Array<T, D>, ratio: (usize, usize), ) -> ((Array<T, D>, Array<T, D>), (Array<T, D>, Array<T, D>)) { let data_len = x.shape()[0]; assert_eq!(data_len, t.shape()[0], "x and t must have same length!"); let idx = random_index(data_len); let split_here = data_len * ratio.0 / (ratio.0 + ratio.1); let x_train = pickup(&x, Axis(0), &idx[..split_here]); let t_train = pickup(&t, Axis(0), &idx[..split_here]); let x_test = pickup(&x, Axis(0), &idx[split_here..]); let t_test = pickup(&t, Axis(0), &idx[split_here..]); ((x_train, t_train), (x_test, t_test)) } pub fn rev_string(s: String) -> String { s.chars().rev().collect() } pub fn expand<T: Copy + Default, D: Dimension>( arr: Array<T, D>, axis: Axis, num: usize, ) -> Array<T, D::Larger> where D::Larger: RemoveAxis, { let mut s = arr.shape().to_vec(); let a = axis.0; s.insert(a, num); let mut arr2 = Array::from_elem(s, T::default()) .into_dimensionality::<D::Larger>() .unwrap(); for mut sub in arr2.axis_iter_mut(axis) { sub.assign(&arr); } arr2 } pub fn split_arr<T: Copy + Default, D: Dimension>( arr: Array<T, D>, axis: Axis, left_size: usize, ) -> (Array<T, D>, Array<T, D>) { let arr1 = arr.slice_axis(axis, Slice::from(..left_size)).to_owned(); let arr2 = arr.slice_axis(axis, Slice::from(left_size..)).to_owned(); (arr1, arr2) }
extern crate lru; use lru::LruCache; use crate::page::{Page, PageType, PAGE_SIZE, PageError}; use std::cell::{RefCell, RefMut}; use std::rc::Rc; use std::iter::Map; use crate::BTree; use std::fs::{File, OpenOptions}; use anyhow::{Result, Error}; use std::borrow::BorrowMut; use std::io::{Seek, SeekFrom, Read}; use std::ops::Deref; const LRU_CACHE_SIZE: usize = 50; #[derive(Debug)] pub struct Pager_manager{ lru:LruCache<u32,Rc<RefCell<Page>>>, pub db_meta_page: Rc<RefCell<Page>>, fd: Rc<RefCell<File>> } impl Pager_manager{ pub fn new(path: &str) -> Pager_manager{ let lru = LruCache::<u32,Rc<RefCell<Page>>>::new(LRU_CACHE_SIZE); let mut fd = OpenOptions::new() .create(true) .read(true) .write(true) .open(path).expect("could not open btree file"); let rc_fd = Rc::new(RefCell::new(fd)); let db_meta_page; if rc_fd.deref().borrow_mut().metadata().unwrap().len() == 0 { db_meta_page = Rc::new(RefCell::new(Pager_manager::init_as_empty(rc_fd.clone()))); } else { db_meta_page = Rc::new(RefCell::new(Pager_manager::init_load(rc_fd.clone()))); } Pager_manager{ lru, db_meta_page, fd: rc_fd } } pub fn init_as_empty(fd: Rc<RefCell<File>>) -> Page { println!("init empty db"); let mut db_meta_page = Page::new(fd, 0, PageType::DB_META).unwrap(); db_meta_page.sync(); db_meta_page } pub fn init_load(fd: Rc<RefCell<File>>) -> Page { println!("load empty db"); let mut page = Page::default(); { let mut _fd = fd.deref().borrow_mut(); _fd.seek(SeekFrom::Start((page.index as usize * PAGE_SIZE) as u64)); _fd.read_exact(page.buf.borrow_mut()); } page.page_type = page.get_page_type(); assert_eq!(page.page_type, PageType::DB_META); page.fd = Some(fd); // page.init_layout(); page } pub fn new_page(&mut self, pt: PageType) -> Result<Rc<RefCell<Page>>> { let mut meta_page = self.db_meta_page.deref().borrow_mut(); let max_index = meta_page.total_pages(); let res = Page::new(self.fd.clone(), max_index, pt)?; self.lru.put(max_index, Rc::new(RefCell::new(res))); meta_page.set_total_page(max_index + 1); Ok(self.lru.get(&max_index).unwrap().clone()) } fn load_page(&mut self, index: u32) -> Result<Page> { if index >= self.db_meta_page.deref().borrow().total_pages() { panic!("load wrong!!") } let mut page = Page::default(); { let mut _fd = self.fd.deref().borrow_mut(); page.index = index; _fd.seek(SeekFrom::Start((index as usize * PAGE_SIZE) as u64))?; _fd.read_exact(page.buf.borrow_mut())?; } page.page_type = page.get_page_type(); page.fd = Some(self.fd.clone()); // page.init_layout(); Ok(page) } pub fn get_page(&mut self, index: u32) -> Result<Rc<RefCell<Page>>>{ if index == 0 { return Ok(self.db_meta_page.clone()); } let res = self.lru.get(&index); match res { Some(res) => { Ok(res.clone()) } None => { match self.load_page(index){ Ok(res) => { let index = res.index; self.lru.put(res.index, Rc::new(RefCell::new(res))); Ok(self.lru.get(&index).unwrap().clone()) } Err(err) => {panic!("err:{}" , err)} } } } } }
use std::error::Error; use std::result::Result; //use crate::bknode::BkNode; pub trait NodeAllocator<'a> { type Key: Clone; type Node; // TODO: type AllocationError: Error; fn new_root(&'a self, key: Self::Key) -> Result<Self::Node, Box<dyn Error>>; fn new_child(&'a self, key: Self::Key) -> Result<Self::Node, Box<dyn Error>>; }
use crate::utils::read_lines; use std::collections::HashMap; use regex::Regex; pub(crate) fn main() { // Tests let filename_ex = "B:\\Dev\\Rust\\projects\\aoc2020\\input\\7_ex.txt"; let all_bags_tests = parse_file(filename_ex); assert!(can_hold(all_bags_tests.get("bright white").unwrap(), all_bags_tests.get("shiny gold").unwrap(), &all_bags_tests)); assert!(can_hold(all_bags_tests.get("muted yellow").unwrap(), all_bags_tests.get("shiny gold").unwrap(), &all_bags_tests)); assert!(can_hold(all_bags_tests.get("dark orange").unwrap(), all_bags_tests.get("shiny gold").unwrap(), &all_bags_tests)); assert!(can_hold(all_bags_tests.get("light red").unwrap(), all_bags_tests.get("shiny gold").unwrap(), &all_bags_tests)); assert_eq!(count_hold(all_bags_tests.get("shiny gold").unwrap(), &all_bags_tests), 4); // Tests p2 let filename_ex_2 = "B:\\Dev\\Rust\\projects\\aoc2020\\input\\7_ex_2.txt"; let all_bags_tests_2 = parse_file(filename_ex_2); println!("{:?}",all_bags_tests_2); assert_eq!(count_total_bags(all_bags_tests_2.get("shiny gold").unwrap(), &all_bags_tests_2),126); println!("All tests passed :)"); // Real puzzle let filename = "B:\\Dev\\Rust\\projects\\aoc2020\\input\\7.txt"; let all_bags = parse_file(filename); let c = count_hold(all_bags.get("shiny gold").unwrap(), &all_bags); println!("{} bags can contains at least one shiny gold bag",c); let ct = count_total_bags(all_bags.get("shiny gold").unwrap(), &all_bags); println!("{} individual bags are required inside my single shiny gold bag",ct); } #[derive(Eq, PartialEq, Hash, Debug)] struct BagContainRule{ bag: String, count: i32, } #[derive(Eq, PartialEq, Hash, Debug)] struct Bag{ bag_type: String, rules : Vec<BagContainRule>, total_contained_bags: i32 } impl Bag{ fn add_rule(&mut self, rule: BagContainRule) { self.total_contained_bags += rule.count; self.rules.push(rule); } } fn count_hold(held_bag: &Bag, all_bags: &HashMap<String, Bag>) -> i32{ let mut ret = 0; for x in all_bags.values() { if can_hold(x, held_bag, all_bags) { //println!("{} can hold a {} bag",x.bag_type,held_bag.bag_type); ret += 1; } } return ret } fn count_total_bags(held_bag: &Bag, all_bags: &HashMap<String, Bag>) -> i32{ let mut total = held_bag.total_contained_bags; for x in &held_bag.rules{ //println!("For {}, adding the bags from rule {}",held_bag.bag_type,x.bag); total += x.count * count_total_bags(all_bags.get(&x.bag).unwrap(),all_bags); } return total; } fn can_hold(holder_bag: &Bag, held_bag: &Bag, all_bags : &HashMap<String, Bag>) -> bool{ let mut any = false; for rule in &holder_bag.rules{ if rule.bag == held_bag.bag_type { return true; } else { any |= can_hold(all_bags.get(&rule.bag).unwrap(),held_bag, all_bags); } } return any; } fn parse_file(filepath : &str) -> HashMap<String,Bag>{ let mut bags = HashMap::new(); match read_lines(filepath) { Ok(lines) => { for line in lines { if let Ok(c) = line { parse_line(&c,&mut bags); } else { println!("Couldn't read line [{:?}]",line); } } } Err(..) => { println!("Couldn't read filepath [{}]",filepath); } } //println!("[{:?}]",bags); return bags; } fn parse_line(line: &str, existing_bags: &mut HashMap<String, Bag>){ // Initialize regex only once lazy_static!{ static ref RE_LINE :Regex = Regex::new(r"([a-zA-Z ]+) bags contain ([a-zA-Z0-9, ]*).").unwrap(); static ref RE_BAGRULE :Regex = Regex::new(r"(\d) ([a-zA-Z ]*) bags?").unwrap(); } if RE_LINE.is_match(line) { let caps = RE_LINE.captures(line).unwrap(); let part1 = caps.get(1).map_or("", |m| m.as_str()); let s_part1 = String::from(part1); let part2 = caps.get(2).map_or("", |m| m.as_str()); // Bag if ! existing_bags.contains_key(&s_part1) { existing_bags.insert(s_part1, Bag{ bag_type: String::from(part1), rules: Vec::new(), total_contained_bags: 0 }); } let split_part2 = part2.split(","); for rule in split_part2 { if RE_BAGRULE.is_match(rule) { let caps_bagrule = RE_BAGRULE.captures(rule).unwrap(); let number = caps_bagrule.get(1).map_or(0, |m| m.as_str().parse().unwrap()); let rule_bag_name_str = caps_bagrule.get(2).map_or("", |m| m.as_str()); let rule_bag_name = String::from(rule_bag_name_str); // Creating bag if needed if ! existing_bags.contains_key(&rule_bag_name) { existing_bags.insert(rule_bag_name, Bag{ bag_type: String::from(rule_bag_name_str), rules: Vec::new(), total_contained_bags: 0 }); } // Creating rule let r = BagContainRule{ bag: String::from(rule_bag_name_str), count:number }; let bag = existing_bags.get_mut(part1).unwrap(); bag.add_rule(r); } } } else { panic!("Can not parse line !") } }
///// chapter 4 "structuring data and matching patterns" ///// program section: // fn main() { struct Kilograms(u32); let weight = Kilograms(250); ///// extracting kgm // let Kilograms(kgm) = weight; println!("weight is {} kilograms", kgm); } ///// output should be: /* weight is 250 kilograms */// end of output
#[doc = "Register `ETH_MACL4A0R` reader"] pub type R = crate::R<ETH_MACL4A0R_SPEC>; #[doc = "Register `ETH_MACL4A0R` writer"] pub type W = crate::W<ETH_MACL4A0R_SPEC>; #[doc = "Field `L4SP0` reader - L4SP0"] pub type L4SP0_R = crate::FieldReader<u16>; #[doc = "Field `L4SP0` writer - L4SP0"] pub type L4SP0_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 16, O, u16>; #[doc = "Field `L4DP0` reader - L4DP0"] pub type L4DP0_R = crate::FieldReader<u16>; #[doc = "Field `L4DP0` writer - L4DP0"] pub type L4DP0_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 16, O, u16>; impl R { #[doc = "Bits 0:15 - L4SP0"] #[inline(always)] pub fn l4sp0(&self) -> L4SP0_R { L4SP0_R::new((self.bits & 0xffff) as u16) } #[doc = "Bits 16:31 - L4DP0"] #[inline(always)] pub fn l4dp0(&self) -> L4DP0_R { L4DP0_R::new(((self.bits >> 16) & 0xffff) as u16) } } impl W { #[doc = "Bits 0:15 - L4SP0"] #[inline(always)] #[must_use] pub fn l4sp0(&mut self) -> L4SP0_W<ETH_MACL4A0R_SPEC, 0> { L4SP0_W::new(self) } #[doc = "Bits 16:31 - L4DP0"] #[inline(always)] #[must_use] pub fn l4dp0(&mut self) -> L4DP0_W<ETH_MACL4A0R_SPEC, 16> { L4DP0_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "Layer4 address filter 0 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`eth_macl4a0r::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`eth_macl4a0r::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct ETH_MACL4A0R_SPEC; impl crate::RegisterSpec for ETH_MACL4A0R_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`eth_macl4a0r::R`](R) reader structure"] impl crate::Readable for ETH_MACL4A0R_SPEC {} #[doc = "`write(|w| ..)` method takes [`eth_macl4a0r::W`](W) writer structure"] impl crate::Writable for ETH_MACL4A0R_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets ETH_MACL4A0R to value 0"] impl crate::Resettable for ETH_MACL4A0R_SPEC { const RESET_VALUE: Self::Ux = 0; }
use serde::de::{DeserializeSeed, Deserializer, EnumAccess, SeqAccess, VariantAccess, Visitor}; use std::error::Error; use std::marker::PhantomData; use std::ops::Deref; use std::vec::IntoIter; use headers::HeadersDeserializationError; pub(super) trait VisitableString<'de>: Deref<Target = str> { fn be_visited<V>(self, visitor: V) -> Result<V::Value, HeadersDeserializationError> where V: Visitor<'de>; } impl<'de> VisitableString<'de> for String { fn be_visited<V>(self, visitor: V) -> Result<V::Value, HeadersDeserializationError> where V: Visitor<'de>, { visitor.visit_string(self) } } impl<'de, 'a: 'de> VisitableString<'de> for &'a str { fn be_visited<V>(self, visitor: V) -> Result<V::Value, HeadersDeserializationError> where V: Visitor<'de>, { visitor.visit_borrowed_str(self) } } pub(super) struct DeserializeValue<'de, S> where S: VisitableString<'de>, { value: S, phantom: PhantomData<&'de str>, } impl<'de, S> DeserializeValue<'de, S> where S: VisitableString<'de>, { pub(super) fn new(value: S) -> Self { DeserializeValue { value, phantom: PhantomData, } } } fn translate_parse_error<E>(source: &'static str, e: E) -> HeadersDeserializationError where E: Error, { let msg = format!("{}", e); HeadersDeserializationError::ParseError { source, msg } } macro_rules! primitive { ($fn:ident, $visit_fn:ident) => { fn $fn<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { match self.value.parse() { Ok(v) => visitor.$visit_fn(v), Err(e) => Err(translate_parse_error(stringify!($fn), e)), } } }; } macro_rules! reject { {$fn:ident, $msg:expr} => { fn $fn<V>(self, _visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de> { Err(HeadersDeserializationError::InvalidValueType { msg: $msg }) } }; {$fn:ident, $msg:expr, ($($arg_i:ident : $arg_t:ty),*)} => { fn $fn<V>(self, $($arg_i : $arg_t),*, _visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de> { Err(HeadersDeserializationError::InvalidValueType { msg: $msg }) } } } impl<'de, S> Deserializer<'de> for DeserializeValue<'de, S> where S: VisitableString<'de>, { type Error = HeadersDeserializationError; fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { self.value.be_visited(visitor) } fn deserialize_string<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { self.deserialize_str(visitor) } fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { self.deserialize_str(visitor) } fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { visitor.visit_unit() } fn deserialize_enum<V>( self, _name: &'static str, _variants: &'static [&'static str], visitor: V, ) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { visitor.visit_enum(ValueEnum::new(self.value)) } fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { visitor.visit_seq(MultiValued::new(self.value)) } fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { visitor.visit_some(self) } primitive!(deserialize_bool, visit_bool); primitive!(deserialize_i8, visit_i8); primitive!(deserialize_i16, visit_i16); primitive!(deserialize_i32, visit_i32); primitive!(deserialize_i64, visit_i64); primitive!(deserialize_u8, visit_u8); primitive!(deserialize_u16, visit_u16); primitive!(deserialize_u32, visit_u32); primitive!(deserialize_u64, visit_u64); primitive!(deserialize_f32, visit_f32); primitive!(deserialize_f64, visit_f64); fn deserialize_char<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { match self.value.chars().next() { Some(c) => visitor.visit_char(c), None => Err(HeadersDeserializationError::InvalidState { msg: "empty string provided for HTTP header, unable to extract char value", }), } } fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { visitor.visit_bytes(self.value.as_bytes()) } fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { self.deserialize_bytes(visitor) } fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { visitor.visit_unit() } fn deserialize_unit_struct<V>( self, _name: &'static str, visitor: V, ) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { self.deserialize_unit(visitor) } fn deserialize_newtype_struct<V>( self, _name: &'static str, visitor: V, ) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { visitor.visit_newtype_struct(self) } reject!( deserialize_tuple, "unsuitable type (tuple) for attribute value", (_len: usize) ); reject!( deserialize_tuple_struct, "unsuitable type (tuple struct) for attribute value", (_name: &'static str, _len: usize) ); reject!(deserialize_map, "unsuitable type (map) for attribute value"); reject!( deserialize_struct, "unsuitable type (struct) for attribute value", (_name: &'static str, _fields: &'static [&'static str]) ); reject!(deserialize_any, "unsuitable type (any) for attribute value"); } struct MultiValued { value_iter: IntoIter<String>, } impl MultiValued { fn new<'de, S>(value: S) -> Self where S: VisitableString<'de>, { let mut curr = None; // For an attribute which has these three values: // // value1\ // value2\ // value3\ // // ... the multi-valued attribute string is represented as: // // value1\;value2\;value3\ // // This is impossible to distinguish from a single attribute value of: // // value1;value2;value3\ // // This is deliberate behaviour in shib-gotham to correctly handle what we get from // `mod_shib`. This exact example has a test case. let iter = str::split(&value, |c| { let prev = curr; curr = Some(c); match prev { Some('\\') => false, _ => c == ';', } }); let values: Vec<String> = iter.map(|s| s.replace(r"\;", ";")).collect(); MultiValued { value_iter: values.into_iter(), } } } impl<'de> SeqAccess<'de> for MultiValued { type Error = HeadersDeserializationError; fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>, Self::Error> where T: DeserializeSeed<'de>, { match self.value_iter.next() { Some(v) => { let de = DeserializeValue::new(v); Ok(Some(seed.deserialize(de)?)) } None => Ok(None), } } } struct ValueEnum<'de, S> where S: VisitableString<'de>, { value: S, phantom: PhantomData<&'de str>, } impl<'de, S> ValueEnum<'de, S> where S: VisitableString<'de>, { fn new(value: S) -> Self { ValueEnum { value, phantom: PhantomData, } } } impl<'de, S> EnumAccess<'de> for ValueEnum<'de, S> where S: VisitableString<'de>, { type Error = HeadersDeserializationError; type Variant = UnitVariant; fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error> where V: DeserializeSeed<'de>, { Ok(( seed.deserialize(DeserializeValue::new(self.value))?, UnitVariant, )) } } struct UnitVariant; impl<'de> VariantAccess<'de> for UnitVariant { type Error = HeadersDeserializationError; fn unit_variant(self) -> Result<(), Self::Error> { Ok(()) } fn newtype_variant_seed<T>(self, _seed: T) -> Result<T::Value, Self::Error> where T: DeserializeSeed<'de>, { Err(HeadersDeserializationError::InvalidValueType { msg: "enum variant requires unsuitable type (newtype), expected only unit variants", }) } fn tuple_variant<V>(self, _len: usize, _visitor: V) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { Err(HeadersDeserializationError::InvalidValueType { msg: "enum variant requires unsuitable type (tuple), expected only unit variants", }) } fn struct_variant<V>( self, _fields: &'static [&'static str], _visitor: V, ) -> Result<V::Value, Self::Error> where V: Visitor<'de>, { Err(HeadersDeserializationError::InvalidValueType { msg: "enum variant requires unsuitable type (struct), expected only unit variants", }) } }
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use std::borrow::Cow; use std::hash::Hash; use im::HashMap; use crate::datatype::bitvec::BitVec; use crate::datatype::AbstractDomain; use crate::datatype::PatriciaTreeMap; /* * A partition is a mapping from a set of labels to elements in an abstract * domain. It denotes a union of properties. A partition is Bottom iff all its * bindings are set to Bottom, and it is Top iff all its bindings are set to * Top. * * All lattice operations are applied componentwise. */ pub trait AbstractPartition<L, D: AbstractDomain>: AbstractDomain { type ContainerType; fn bindings(&self) -> Option<&Self::ContainerType>; fn into_bindings(self) -> Option<Self::ContainerType>; fn len(&self) -> usize; fn is_empty(&self) -> bool; fn get(&self, label: &L) -> Cow<'_, D>; fn set(&mut self, label: L, domain: D); fn update(&mut self, label: &L, op: impl FnOnce(&mut D)); } /* * In order to minimize the size of the hashtable, we do not explicitly * represent bindings to Bottom. * * This implementation differs slightly from the textbook definition of a * partition: our Top partition cannot have its labels re-bound to anything * other than Top. I.e. for all labels L and domains D, * * HashMapAbstractPartition::top().set(L, D) == HashMapAbstractPartition::top() * * This makes for a much simpler implementation. */ #[derive(Clone, PartialEq, Eq, Debug)] pub enum HashMapAbstractPartition<L: Clone + Eq + Hash, D: AbstractDomain> { Top, Value(HashMap<L, D>), // Use empty map value as bottom } impl<L, D> AbstractPartition<L, D> for HashMapAbstractPartition<L, D> where L: Clone + Eq + Hash, D: AbstractDomain, { type ContainerType = HashMap<L, D>; fn bindings(&self) -> Option<&Self::ContainerType> { match self { Self::Value(ref map) => Some(map), _ => None, } } fn into_bindings(self) -> Option<Self::ContainerType> { match self { Self::Value(map) => Some(map), _ => None, } } fn len(&self) -> usize { self.bindings() .expect("Top abstract domain doesn't have a length!") .len() } fn is_empty(&self) -> bool { match self { Self::Top => false, Self::Value(map) => map.is_empty(), } } fn get(&self, label: &L) -> Cow<'_, D> { let map = match self { Self::Top => return Cow::Owned(D::top()), Self::Value(map) => map, }; match map.get(label) { Some(domain) => Cow::Borrowed(domain), None => Cow::Owned(D::bottom()), } } fn set(&mut self, label: L, domain: D) { let map = match self { Self::Top => return, Self::Value(ref mut map) => map, }; // Save some memory by implicitly storing bottom. if domain.is_bottom() { map.remove(&label); } else { map.insert(label, domain); } } fn update(&mut self, label: &L, op: impl FnOnce(&mut D)) { let map = match self { Self::Top => return, Self::Value(ref mut map) => map, }; match map.get_mut(label) { Some(domain) => op(domain), None => { let mut temp = D::bottom(); op(&mut temp); if !temp.is_bottom() { map.insert(label.clone(), temp); } } } } } impl<L, D> AbstractDomain for HashMapAbstractPartition<L, D> where L: Clone + Eq + Hash, D: AbstractDomain, { fn bottom() -> Self { Self::Value(HashMap::new()) } fn top() -> Self { Self::Top } fn is_bottom(&self) -> bool { match self { Self::Value(map) => map.is_empty(), _ => false, } } fn is_top(&self) -> bool { matches!(self, Self::Top) } fn leq(&self, rhs: &Self) -> bool { use HashMapAbstractPartition::*; match (self, rhs) { (Top, _) => rhs.is_top(), (_, Top) => true, (Value(self_map), Value(other_map)) => { if self_map.len() > other_map.len() { // Perf optimization false } else { for (k, v) in self_map.iter() { match other_map.get(k) { Some(rd) => { if !v.leq(rd) { return false; } } None => return false, } } true } } } } fn join_with(&mut self, rhs: Self) { Self::join_like_operation(self, rhs, |d1, d2| d1.join_with(d2)); } fn meet_with(&mut self, rhs: Self) { Self::meet_like_operation(self, rhs, |d1, d2| d1.meet_with(d2)); } fn widen_with(&mut self, rhs: Self) { Self::join_like_operation(self, rhs, |d1, d2| d1.widen_with(d2)); } fn narrow_with(&mut self, rhs: Self) { Self::meet_like_operation(self, rhs, |d1, d2| d1.narrow_with(d2)); } } impl<L, D> HashMapAbstractPartition<L, D> where L: Clone + Eq + Hash, D: AbstractDomain, { fn join_like_operation(lhs: &mut Self, rhs: Self, operation: impl Fn(&mut D, D)) { use HashMapAbstractPartition::*; match (lhs, rhs) { (Top, _) => {} (lhs, Top) => { *lhs = Top; } (Value(l_map), Value(r_map)) => { for (r_k, r_v) in r_map.into_iter() { if let Some(l_v) = l_map.get_mut(&r_k) { operation(l_v, r_v); // l_v wasn't bottom. A join-like operation should not make it bottom. assert!(!l_v.is_bottom()); } else { // The value is Bottom, we just insert the other value (Bottom is the // identity for join-like operations). l_map.insert(r_k, r_v); } } } } } fn meet_like_operation(lhs: &mut Self, rhs: Self, operation: impl Fn(&mut D, D)) { use HashMapAbstractPartition::*; match (lhs, rhs) { (lhs @ Top, rhs) => { *lhs = rhs; } (_, Top) => {} (Value(l_map), Value(mut r_map)) => { l_map.retain(|l_k, _| r_map.contains_key(l_k)); for (l_k, l_v) in l_map.iter_mut() { let r_v = r_map.remove(l_k).unwrap(); operation(l_v, r_v); } l_map.retain(|_, l_v| !l_v.is_bottom()); } } } } #[derive(Debug)] pub enum PatriciaTreeMapAbstractPartition<L: Into<BitVec> + Clone, D: Sized + Eq + AbstractDomain> { Top, Value(PatriciaTreeMap<L, D>), // Use empty map value as bottom } impl<L, D> Clone for PatriciaTreeMapAbstractPartition<L, D> where L: Into<BitVec> + Clone, D: Sized + AbstractDomain, { fn clone(&self) -> Self { use PatriciaTreeMapAbstractPartition::*; match self { Top => Top, Value(map) => Value(map.clone()), } } } impl<L, D> PartialEq for PatriciaTreeMapAbstractPartition<L, D> where L: Into<BitVec> + Clone, D: Sized + AbstractDomain, { fn eq(&self, rhs: &Self) -> bool { use PatriciaTreeMapAbstractPartition::*; match (self, rhs) { (Top, Top) => true, (Value(l_map), Value(r_map)) => l_map == r_map, (_, _) => false, } } } impl<L, D> Eq for PatriciaTreeMapAbstractPartition<L, D> where L: Into<BitVec> + Clone, D: Sized + AbstractDomain, { } impl<L, D> AbstractPartition<L, D> for PatriciaTreeMapAbstractPartition<L, D> where L: Into<BitVec> + Clone, D: Sized + AbstractDomain, { type ContainerType = PatriciaTreeMap<L, D>; fn bindings(&self) -> Option<&Self::ContainerType> { match self { Self::Value(ref map) => Some(map), _ => None, } } fn into_bindings(self) -> Option<Self::ContainerType> { match self { Self::Value(map) => Some(map), _ => None, } } fn len(&self) -> usize { self.bindings() .expect("Top abstract domain doesn't have a length!") .len() } fn is_empty(&self) -> bool { match self { Self::Top => false, Self::Value(map) => map.is_empty(), } } fn get(&self, label: &L) -> Cow<'_, D> { let map = match self { Self::Top => return Cow::Owned(D::top()), Self::Value(map) => map, }; match map.get(label.clone()) { Some(domain) => Cow::Borrowed(domain), None => Cow::Owned(D::bottom()), } } fn set(&mut self, label: L, domain: D) { let map = match self { Self::Top => return, Self::Value(ref mut map) => map, }; // Save some memory by implicitly storing bottom. if domain.is_bottom() { map.remove(label); } else { map.upsert(label, domain); } } fn update(&mut self, label: &L, op: impl FnOnce(&mut D)) { let map = match self { Self::Top => return, Self::Value(ref mut map) => map, }; let mut update_domain = match map.get(label.clone()) { Some(domain) => domain.clone(), None => D::bottom(), }; op(&mut update_domain); self.set(label.clone(), update_domain); } } impl<L, D> AbstractDomain for PatriciaTreeMapAbstractPartition<L, D> where L: Into<BitVec> + Clone, D: Sized + AbstractDomain, { fn bottom() -> Self { Self::Value(PatriciaTreeMap::new()) } fn top() -> Self { Self::Top } fn is_bottom(&self) -> bool { match self { Self::Value(map) => map.is_empty(), _ => false, } } fn is_top(&self) -> bool { matches!(self, Self::Top) } fn leq(&self, rhs: &Self) -> bool { use PatriciaTreeMapAbstractPartition::*; match (self, rhs) { (Top, _) => rhs.is_top(), (_, Top) => true, (Value(self_map), Value(other_map)) => self_map.leq(other_map, &D::bottom()), } } fn join_with(&mut self, rhs: Self) { Self::join_like_operation(self, rhs, |d1, d2| d1.join_with(d2)); } fn meet_with(&mut self, rhs: Self) { Self::meet_like_operation(self, rhs, |d1, d2| d1.meet_with(d2)); } fn widen_with(&mut self, rhs: Self) { Self::join_like_operation(self, rhs, |d1, d2| d1.widen_with(d2)); } fn narrow_with(&mut self, rhs: Self) { Self::meet_like_operation(self, rhs, |d1, d2| d1.narrow_with(d2)); } } impl<L, D> PatriciaTreeMapAbstractPartition<L, D> where L: Into<BitVec> + Clone, D: Sized + AbstractDomain, { fn join_like_operation(lhs: &mut Self, rhs: Self, operation: impl Fn(&mut D, D)) { use PatriciaTreeMapAbstractPartition::*; match (lhs, rhs) { (Top, _) => {} (lhs, rhs @ Top) => { *lhs = rhs; } (Value(lmap), Value(rmap)) => { lmap.union_with(&rmap, |s, t| { let mut s = s.clone(); operation(&mut s, t.clone()); s }); } } } fn meet_like_operation(lhs: &mut Self, rhs: Self, operation: impl Fn(&mut D, D)) { use PatriciaTreeMapAbstractPartition::*; match (lhs, rhs) { (lhs @ Top, rhs) => { *lhs = rhs; } (_, Top) => {} (Value(lmap), Value(rmap)) => { lmap.intersect_with(&rmap, |s, t| { let mut s = s.clone(); operation(&mut s, t.clone()); s }); } } } }
#![forbid(unsafe_code, missing_docs, missing_debug_implementations, warnings)] #![doc(html_root_url = "https://docs.rs/rsa-der/0.2.0")] //! A simple crate to encode and decode DER-formatted public RSA keys. //! //! Public keys are passed to and returned from functions simply using the `n` and `e` //! components, so any RSA library can be used in conjunction with this crate. //! //! # Examples //! Convert an RSA public key to DER bytes: //! ```no_run //! # use rsa::{RSAPrivateKey, PublicKey}; //! use rand::rngs::OsRng; //! # fn generate_key() -> impl PublicKey { //! # let mut rng = OsRng::new().unwrap(); //! # let key = RSAPrivateKey::new(&mut rng, 2048).unwrap(); //! # key //! # } //! //! let key = generate_key(); //! let der_bytes = rsa_der::public_key_to_der(&key.n().to_bytes_be(), &key.e().to_bytes_be()); //! ``` use simple_asn1::{oid, ASN1Block, BigInt}; use std::fmt; use std::fmt::{Display, Formatter}; /// Encodes an RSA public key to DER bytes, as specified /// by the PKCS#8 format. /// /// The `n` and `e` parameters are the big-endian modulus /// and exponent of the public key, respectively. Simple /// `u8` slices are used to allow usage of this function /// in conjunction with any crypto library. /// /// # Examples /// Encoding an RSA public key generated using the [`rsa`](https://docs.rs/rsa) /// crate: /// ``` /// use rand::rngs::OsRng; /// use rsa_der::public_key_to_der; /// use rsa::{RSAPrivateKey, PublicKey}; /// /// let mut rng = OsRng::new().unwrap(); /// let key = RSAPrivateKey::new(&mut rng, 2048).unwrap(); /// /// let der_bytes = public_key_to_der(&key.n().to_bytes_be(), &key.e().to_bytes_be()); /// ``` pub fn public_key_to_der(n: &[u8], e: &[u8]) -> Vec<u8> { let mut root_sequence = vec![]; // Weird magic number - I have no idea what this is supposed to mean. let oid = oid!(1, 2, 840, 113_549, 1, 1, 1); root_sequence.push(ASN1Block::Sequence( 0, vec![ASN1Block::ObjectIdentifier(0, oid), ASN1Block::Null(0)], )); let n_block = ASN1Block::Integer(0, BigInt::from_signed_bytes_be(n)); let e_block = ASN1Block::Integer(0, BigInt::from_signed_bytes_be(e)); let rsa_key_bits = simple_asn1::to_der(&ASN1Block::Sequence(0, vec![n_block, e_block])).unwrap(); root_sequence.push(ASN1Block::BitString( 0, rsa_key_bits.len() * 8, rsa_key_bits, )); simple_asn1::to_der(&ASN1Block::Sequence(0, root_sequence)).unwrap() } /// Error type for `rsa-der`. #[derive(Debug, Clone, PartialEq)] pub enum Error { /// Indicates that a DER decoding error occurred. InvalidDer(simple_asn1::ASN1DecodeErr), /// Indicates that the RSA bitstring was not found. BitStringNotFound, /// Indicates that the RSA ASN.1 sequence was not found. SequenceNotFound, /// Indicates that the RSA modulus value was not found. ModulusNotFound, /// Indicates that the RSA exponent value was not found. ExponentNotFound, /// Indicates that the RSA ASN.1 sequence did not contain exactly two values (one /// for `n` and one for `e`). InvalidSequenceLength, } type StdResult<T, E> = std::result::Result<T, E>; /// Result type for `rsa-der`. This type /// is equivalent to `std::result::Result<T, rsa_der::Error>`. pub type Result<T> = StdResult<T, Error>; impl Display for Error { fn fmt(&self, f: &mut Formatter) -> StdResult<(), fmt::Error> { match self { Error::InvalidDer(e) => e.fmt(f)?, Error::BitStringNotFound => f.write_str("RSA bit string not found in ASN.1 blocks")?, Error::SequenceNotFound => f.write_str("ASN.1 sequence not found")?, Error::ModulusNotFound => f.write_str("ASN.1 public key modulus not found")?, Error::ExponentNotFound => f.write_str("ASN.1 public key exponent not found")?, Error::InvalidSequenceLength => { f.write_str("ASN.1 sequence did not contain exactly two values")? } } Ok(()) } } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Error::InvalidDer(e) => Some(e), _ => None, } } } /// Decodes a DER-encoded public key into the raw /// `n` and `e` components. /// /// The returned tuple is in the form `(n, e)`, where `n` and `e` /// are both big-endian big integers representing the key modulus /// and exponent, respectively. /// /// # Examples /// Parsing DER bytes into a public RSA key usable with the /// [`rsa`](https://docs.rs/rsa/) crate: /// ```no_run /// # fn main() -> Result<(), rsa_der::Error> { /// use rsa::RSAPublicKey; /// use num_bigint_dig::BigUint; /// /// # fn get_der_bytes() -> &'static [u8] { &[0] } /// let bytes: &[u8] = get_der_bytes(); /// /// let (n, e) = rsa_der::public_key_from_der(bytes)?; /// /// let key = RSAPublicKey::new(BigUint::from_bytes_be(&n), BigUint::from_bytes_be(&e)); /// # Ok(()) /// # } /// ``` pub fn public_key_from_der(der: &[u8]) -> Result<(Vec<u8>, Vec<u8>)> { let blocks = simple_asn1::from_der(der).map_err(Error::InvalidDer)?; let mut bit_strings = Vec::with_capacity(1); find_bit_string(&blocks, &mut bit_strings); if bit_strings.is_empty() { return Err(Error::BitStringNotFound); } let bit_string = &bit_strings[0]; let inner_asn = simple_asn1::from_der(bit_string).map_err(Error::InvalidDer)?; let (n, e) = match &inner_asn[0] { ASN1Block::Sequence(_, blocks) => { if blocks.len() != 2 { return Err(Error::InvalidSequenceLength); } let n = match &blocks[0] { ASN1Block::Integer(_, n) => n, _ => return Err(Error::ModulusNotFound), }; let e = match &blocks[1] { ASN1Block::Integer(_, e) => e, _ => return Err(Error::ExponentNotFound), }; (n, e) } _ => return Err(Error::SequenceNotFound), }; Ok((n.to_bytes_be().1, e.to_bytes_be().1)) } /// Recursively through ASN1 blocks, attempting /// to find a BitString value. fn find_bit_string(blocks: &[ASN1Block], mut result: &mut Vec<Vec<u8>>) { for block in blocks.iter() { match block { ASN1Block::BitString(_, _, bytes) => result.push(bytes.to_vec()), ASN1Block::Sequence(_, blocks) => find_bit_string(&blocks[..], &mut result), _ => (), } } } #[cfg(test)] mod tests { use super::*; use openssl::rsa::Rsa; #[test] fn test_public_key_to_der() { let key = Rsa::generate(2048).unwrap(); let bytes = public_key_to_der(&key.n().to_vec(), &key.e().to_vec()); // Confirm that converting back works correctly let new_key = Rsa::public_key_from_der(&bytes).unwrap(); assert_eq!(key.n(), new_key.n()); assert_eq!(key.e(), new_key.e()); } #[test] fn test_public_key_from_der() { let key = Rsa::generate(2048).unwrap(); let der = key.public_key_to_der().unwrap(); let (n, e) = public_key_from_der(&der).unwrap(); assert_eq!(key.n().to_vec(), n); assert_eq!(key.e().to_vec(), e); } }
use std::ops::RangeInclusive; use std::os::raw::c_void; use std::ptr; use crate::internal::DataTypeKind; use crate::string::ImStr; use crate::sys; use crate::Ui; /// Builder for a slider widget. #[derive(Copy, Clone, Debug)] #[must_use] pub struct Slider<'a, T: DataTypeKind> { label: &'a ImStr, min: T, max: T, display_format: Option<&'a ImStr>, power: f32, } impl<'a, T: DataTypeKind> Slider<'a, T> { /// Constructs a new slider builder with the given range. pub fn new(label: &ImStr, range: RangeInclusive<T>) -> Slider<T> { Slider { label, min: *range.start(), max: *range.end(), display_format: None, power: 1.0, } } /// Sets the display format using *a C-style printf string* #[inline] pub fn display_format(mut self, display_format: &'a ImStr) -> Self { self.display_format = Some(display_format); self } /// Sets the power (exponent) of the slider values #[inline] pub fn power(mut self, power: f32) -> Self { self.power = power; self } /// Builds a slider that is bound to the given value. /// /// Returns true if the slider value was changed. pub fn build(self, _: &Ui, value: &mut T) -> bool { unsafe { sys::igSliderScalar( self.label.as_ptr(), T::KIND as i32, value as *mut T as *mut c_void, &self.min as *const T as *const c_void, &self.max as *const T as *const c_void, self.display_format .map(ImStr::as_ptr) .unwrap_or(ptr::null()), self.power, ) } } /// Builds a horizontal array of multiple sliders attached to the given slice. /// /// Returns true if any slider value was changed. pub fn build_array(self, _: &Ui, values: &mut [T]) -> bool { unsafe { sys::igSliderScalarN( self.label.as_ptr(), T::KIND as i32, values.as_mut_ptr() as *mut c_void, values.len() as i32, &self.min as *const T as *const c_void, &self.max as *const T as *const c_void, self.display_format .map(ImStr::as_ptr) .unwrap_or(ptr::null()), self.power, ) } } } /// Builder for a vertical slider widget. #[derive(Clone, Debug)] #[must_use] pub struct VerticalSlider<'a, T: DataTypeKind + Copy> { label: &'a ImStr, size: [f32; 2], min: T, max: T, display_format: Option<&'a ImStr>, power: f32, } impl<'a, T: DataTypeKind> VerticalSlider<'a, T> { /// Constructs a new vertical slider builder with the given size and range. pub fn new(label: &ImStr, size: [f32; 2], range: RangeInclusive<T>) -> VerticalSlider<T> { VerticalSlider { label, size, min: *range.start(), max: *range.end(), display_format: None, power: 1.0, } } /// Sets the display format using *a C-style printf string* #[inline] pub fn display_format(mut self, display_format: &'a ImStr) -> Self { self.display_format = Some(display_format); self } /// Sets the power (exponent) of the slider values #[inline] pub fn power(mut self, power: f32) -> Self { self.power = power; self } /// Builds a vertical slider that is bound to the given value. /// /// Returns true if the slider value was changed. pub fn build(self, _: &Ui, value: &mut T) -> bool { unsafe { sys::igVSliderScalar( self.label.as_ptr(), self.size.into(), T::KIND as i32, value as *mut T as *mut c_void, &self.min as *const T as *const c_void, &self.max as *const T as *const c_void, self.display_format .map(ImStr::as_ptr) .unwrap_or(ptr::null()), self.power, ) } } } /// Builder for an angle slider widget. #[derive(Copy, Clone, Debug)] #[must_use] pub struct AngleSlider<'a> { label: &'a ImStr, min_degrees: f32, max_degrees: f32, display_format: &'a ImStr, } impl<'a> AngleSlider<'a> { /// Constructs a new angle slider builder. pub fn new(label: &ImStr) -> AngleSlider { use crate::im_str; AngleSlider { label, min_degrees: -360.0, max_degrees: 360.0, display_format: im_str!("%.0f deg"), } } /// Sets the minimum value (in degrees) #[inline] pub fn min_degrees(mut self, min_degrees: f32) -> Self { self.min_degrees = min_degrees; self } /// Sets the maximum value (in degrees) #[inline] pub fn max_degrees(mut self, max_degrees: f32) -> Self { self.max_degrees = max_degrees; self } /// Sets the display format using *a C-style printf string* #[inline] pub fn display_format(mut self, display_format: &'a ImStr) -> Self { self.display_format = display_format; self } /// Builds an angle slider that is bound to the given value (in radians). /// /// Returns true if the slider value was changed. pub fn build(self, _: &Ui, value_rad: &mut f32) -> bool { unsafe { sys::igSliderAngle( self.label.as_ptr(), value_rad as *mut _, self.min_degrees, self.max_degrees, self.display_format.as_ptr(), ) } } }
use std::fmt; use super::{ ChannelId, Channel, Emoji, Member, RoleId, Role, UserId, User, IncidentStatus }; use ::internal::prelude::*; /// Allows something - such as a channel or role - to be mentioned in a message. pub trait Mentionable { fn mention(&self) -> String; } impl Mentionable for ChannelId { fn mention(&self) -> String { format!("{}", self) } } impl Mentionable for Channel { fn mention(&self) -> String { format!("{}", self) } } impl Mentionable for Emoji { fn mention(&self) -> String { format!("{}", self) } } impl Mentionable for Member { fn mention(&self) -> String { format!("{}", self.user) } } impl Mentionable for RoleId { fn mention(&self) -> String { format!("{}", self) } } impl Mentionable for Role { fn mention(&self) -> String { format!("{}", self) } } impl Mentionable for UserId { fn mention(&self) -> String { format!("{}", self) } } impl Mentionable for User { fn mention(&self) -> String { format!("{}", self) } } /// A mention targeted at a certain model. /// /// A mention can be created by calling `.mention()` on anything that is /// mentionable - or an item's Id - and can be formatted into a string using /// [`format!`]: /// /// ```rust,ignore /// let message = format!("Mentioning {}", user.mention()); /// ``` /// /// If a `String` is required, call `mention.to_string()`. pub struct Mention { pub prefix: &'static str, pub id: u64, } impl fmt::Display for Mention { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { try!(f.write_str(self.prefix)); try!(fmt::Display::fmt(&self.id, f)); fmt::Write::write_char(f, '>') } } impl IncidentStatus { #[doc(hidden)] pub fn decode(value: Value) -> Result<Self> { Self::decode_str(value) } }
extern crate serde_json; use failure; use failure::Error; use serde_json::Value; use std::fs; use std::io::Read; use std::path::Path; use util; #[derive(Deserialize, Debug)] pub struct PublicMessage { what: String, data: Option<serde_json::Value>, } #[derive(Deserialize, Debug)] pub struct Message { pub uid: String, pwd: String, what: String, data: Option<serde_json::Value>, } #[derive(Serialize, Deserialize, Debug)] pub struct SaveScript { name: String, script: String, } #[derive(Serialize, Deserialize)] pub struct ServerResponse { pub what: String, pub content: Value, } #[derive(Serialize, Deserialize)] pub struct ServerError { pub errortext: String, } fn load_script(name: &str) -> Result<ServerResponse, Error> { Ok(ServerResponse { what: "script".to_string(), content: serde_json::to_value(SaveScript { name: name.to_string(), script: util::load_string(format!("scripts/{}", name).as_str())?, })?, }) } fn save_script(name: &str, script: &str) -> Result<ServerResponse, Error> { info!("save_script {} {}", name, script); // how many scripts have we? let tbdir = Path::new("scripts/"); let fc = fs::read_dir(tbdir)?.count(); if fc > 500 { Err(failure::err_msg("too many scripts, can't save!")) } else if script.len() > 100000 { Err(failure::err_msg("script too long, can't save!")) } else { let allowed = "abcdefghijklmnopqrstuvwxyz1234567890_"; let mut edname = String::new(); name.chars().for_each(|c| { if allowed.contains(c) { edname.push(c) } else if c == ' ' { edname.push('_') } }); if edname.len() > 80 { Err(failure::err_msg("script name too long, can't save!")) } else if edname.len() == 0 { Err(failure::err_msg("empty script, can't save!")) } else { util::write_string(format!("scripts/{}", edname).as_str(), script)?; Ok(ServerResponse { what: "script written!".to_string(), content: serde_json::to_value(edname)?, }) } } } // public json msgs don't require login. pub fn process_public_json( ip: &Option<&str>, msg: PublicMessage, ) -> Result<Option<ServerResponse>, Error> { match msg.what.as_str() { "getscript" => match msg.data { None => Ok(Some(ServerResponse { what: "no script specified!".to_string(), content: serde_json::Value::Null, })), Some(data) => { info!("public getscript:{}", data); let name: String = serde_json::from_value(data)?; (load_script(name.as_str())).map(Some) } }, "savescript" => match msg.data { None => Ok(Some(ServerResponse { what: "no script specified!".to_string(), content: serde_json::Value::Null, })), Some(data) => { info!("public savescript:{}", data); let blah: SaveScript = serde_json::from_value(data)?; save_script(blah.name.as_str(), &blah.script).map(Some) } }, "getscriptlist" => Ok(Some(ServerResponse { what: "scriptlist".to_string(), content: serde_json::to_value(script_list()?)?, })), wat => Err(failure::err_msg(format!("invalid 'what' code:'{}'", wat))), } } pub fn script_list() -> Result<Vec<String>, Error> { // find all script files. let tbdir = Path::new("scripts/"); let mut scriptnames = Vec::new(); if tbdir.is_dir() { fs::read_dir(tbdir)?.for_each(|b| match b { Ok(c) => { c.path().file_stem().map(|os| { os.to_str().map(|s| scriptnames.push(s.to_string())); }); } Err(_) => (), }); Ok(scriptnames) } else { Err(failure::err_msg("scripts/ is not a directory!")) } }
extern crate rand; extern crate flame; use std::fs::File; fn main() { let mut data = vec![0; 1000]; for di in 0..data.len() { data[di] = rand::random::<u64>(); } flame::start("sort n=1000"); data.sort(); flame::end("sort n=1000"); flame::start("binary search n=1000 100 times"); for _ in 0..100 { let c = rand::random::<u64>(); data.binary_search(&c).ok(); } flame::end("binary search n=1000 100 times"); let mut data = vec![0; 10000]; for di in 0..data.len() { data[di] = rand::random::<u64>(); } flame::start("sort n=10000"); data.sort(); flame::end("sort n=10000"); flame::start("binary search n=10000 100 times"); for _ in 0..100 { let c = rand::random::<u64>(); data.binary_search(&c).ok(); } flame::end("binary search n=10000 100 times"); flame::dump_html(&mut File::create("flame-graph.html").unwrap()).unwrap(); }
table! { accounts (id) { id -> Int4, url -> Varchar, } } table! { posts (id) { id -> Int4, src -> Int4, privacy -> Int4, content_warning -> Nullable<Varchar>, text -> Nullable<Varchar>, image_data -> Nullable<Json>, time -> Timestamptz, } } table! { posts_dests (id) { id -> Int4, post_id -> Int4, dest_id -> Int4, } } joinable!(posts -> accounts (src)); joinable!(posts_dests -> posts (post_id)); joinable!(posts_dests -> accounts (dest_id)); allow_tables_to_appear_in_same_query!( accounts, posts, posts_dests, );
#![no_std] #![no_main] #![feature(trait_alias)] #![feature(min_type_alias_impl_trait)] #![feature(impl_trait_in_bindings)] #![feature(type_alias_impl_trait)] #![allow(incomplete_features)] #[path = "../example_common.rs"] mod example_common; use embassy_stm32::{ gpio::{Level, Output, Speed}, rcc::*, }; use embedded_hal::digital::v2::OutputPin; use example_common::*; use cortex_m_rt::entry; #[entry] fn main() -> ! { info!("Hello World!"); let mut p = embassy_stm32::init(Default::default()); Rcc::new(p.RCC).enable_debug_wfe(&mut p.DBGMCU, true); let mut led = Output::new(p.PB5, Level::High, Speed::Low); loop { info!("high"); led.set_high().unwrap(); cortex_m::asm::delay(1_000_000); info!("low"); led.set_low().unwrap(); cortex_m::asm::delay(1_000_000); } }
#![warn(clippy::pedantic)] // This is the canonical "Hello World" example for RLTK. // It's like example 01, but we implement a sparse second terminal using a nicer VGA font // for the FPS and frame time portions. This illustrates how you can combine multiple fonts // on a single layered console. ////////////////////////////////////////////////////////////// // We're utilizing functionality from RLTK, so we need to tell it to use the crate. rltk::add_wasm_support!(); extern crate rltk; // We're using Rltk (the main context) and GameState (a trait defining what our callback // looks like), so we need to use that, too.` use rltk::prelude::*; // This is the structure that will store our game state, typically a state machine pointing to // other structures. This demo is realy simple, so we'll just put the minimum to make it work // in here. struct State { y: i32, going_down: bool, } // We have to implement the "trait" GameState for our state object. This gives it a callback // point for the main loop. impl GameState for State { // This is called every time the screen refreshes (a "tick") by RLTK's main loop. Since GUIs // require that you process events every turn - rather than just sequentially like a good old text // console, you have to run your game as something of a state machine. This will be fleshed out in // later tutorials. For now, it just shows you the frame rate and says "Hello World". fn tick(&mut self, ctx: &mut Rltk) { let mut draw_batch = DrawBatch::new(); let col1 = RGB::named(rltk::CYAN); let col2 = RGB::named(rltk::YELLOW); let percent: f32 = self.y as f32 / 50.0; let fg = col1.lerp(col2, percent); // The first console created (8x8) is always console 0. This makes it the recipient // of draw calls sent to ctx. You can also do ctx.consoles[0] - but that's more typing. draw_batch.target(0); draw_batch.cls(); draw_batch.print_color( Point::new(1, self.y), "Hello RLTK World", ColorPair::new(fg, RGB::named(rltk::BLACK)), ); // Lets make the hello bounce up and down if self.going_down { self.y += 1; if self.y > 48 { self.going_down = false; } } else { self.y -= 1; if self.y < 2 { self.going_down = true; } } // We'll also show the frame rate, since we generally care about keeping that high. // We want to show this in VGA 8x16 font, so we'll set to console 1 - the one we added. // Again, this could be ctx.consoles[1] - but the short-hand is easier. draw_batch.target(1); draw_batch.cls(); draw_batch.draw_double_box( Rect::with_size(39, 0, 20, 3), ColorPair::new(RGB::named(rltk::WHITE), RGB::named(rltk::BLACK)), ); draw_batch.print_color( Point::new(40, 1), &format!("FPS: {}", ctx.fps), ColorPair::new(RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK)), ); draw_batch.print_color( Point::new(40, 2), &format!("Frame Time: {} ms", ctx.frame_time_ms), ColorPair::new(RGB::named(rltk::CYAN), RGB::named(rltk::BLACK)), ); draw_batch.submit(0); render_draw_buffer(ctx); } } // Every program needs a main() function! fn main() { // RLTK provides a simple initializer for a simple 8x8 font window of a given number of // characters. Since that's all we need here, we'll use it! // We're specifying that we want an 80x50 console, with a title, and a relative path // to where it can find the font files and shader files. // These would normally be "resources" rather than "../../resources" - but to make it // work in the repo without duplicating, they are a relative path. let mut context = Rltk::init_simple8x8(80, 50, "Hello RLTK World", "resources"); // We want to add a second layer, using an 8x16 VGA font. It looks nicer, and shows how // RLTK can have layers. // // We start by loading the font. let font = context.register_font(rltk::Font::load("resources/vga8x16.png", (8, 16))); // Then we initialize it; notice 80x25 (half the height, since 8x16 is twice as tall). // This actually returns the console number, but it's always going to be 1. context.register_console(rltk::SparseConsole::init(80, 25, &context.backend), font); // Now we create an empty state object. let gs = State { y: 1, going_down: true, }; // Call into RLTK to run the main loop. This handles rendering, and calls back into State's tick // function every cycle. rltk::main_loop(context, gs); }
/* * Copyright 2018 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ------------------------------------------------------------------------------ */ /// Structure to read PoET configuration from toml file #[derive(Debug, Deserialize, Clone)] pub struct PoetConfig { spid: String, ias_url: String, spid_cert_file: String, password: String, rest_api: String, ias_report_key_file: String, poet_client_private_key_file: String, is_genesis_node: bool, genesis_batch_path: String, validator_pub_key: String, log_dir: String, } impl PoetConfig { /// Getters fot the members pub fn get_spid(&self) -> String { self.spid.clone() } pub fn get_ias_url(&self) -> String { self.ias_url.clone() } pub fn get_spid_cert_file(&self) -> String { self.spid_cert_file.clone() } pub fn get_password(&self) -> String { self.password.clone() } pub fn get_rest_api(&self) -> String { self.rest_api.clone() } pub fn get_ias_report_key_file(&self) -> String { self.ias_report_key_file.clone() } pub fn get_poet_client_private_key_file(&self) -> String { self.poet_client_private_key_file.clone() } pub fn is_genesis(&self) -> bool { self.is_genesis_node } pub fn get_genesis_batch_path(&self) -> String { self.genesis_batch_path.clone() } pub fn get_validator_pub_key(&self) -> String { self.validator_pub_key.clone() } pub fn set_is_genesis(&mut self, is_genesis: bool) { self.is_genesis_node = is_genesis; } pub fn set_genesis_batch_path(&mut self, path: String) { self.genesis_batch_path = path; } pub fn get_log_dir(&mut self) -> String { return self.log_dir.clone(); } }
use nix::unistd::getppid; use std::env; fn main() { let ppid = getppid(); let pwd = env::current_dir().unwrap(); let is_current = true || false; if (is_current) { print!("*"); } println!("{}: {}",ppid,pwd.display()); }
mod add; mod array; mod bang; mod call; mod closure; mod constant; mod current_closure; mod div; mod equal; mod get_builtin; mod get_free; mod get_global; mod get_local; mod greater_than; mod hash; mod index; mod jump; mod jump_not_truthy; mod minus; mod mreturn; mod mul; mod not_equal; mod null; mod ofalse; mod otrue; mod pop; mod return_value; mod set_global; mod set_local; mod sub; pub use add::Add; pub use array::Array; pub use bang::Bang; pub use call::Call; pub use closure::Closure; pub use constant::Constant; pub use current_closure::CurrentClosure; pub use div::Div; pub use equal::Equal; pub use get_builtin::GetBuiltin; pub use get_free::GetFree; pub use get_global::GetGlobal; pub use get_local::GetLocal; pub use greater_than::GreaterThan; pub use hash::Hash; pub use index::Index; pub use jump::Jump; pub use jump_not_truthy::JumpNotTruthy; pub use minus::Minus; pub use mreturn::Return; pub use mul::Mul; pub use not_equal::NotEqual; pub use null::Null; pub use ofalse::False; pub use otrue::True; pub use pop::Pop; pub use return_value::ReturnValue; pub use set_global::SetGlobal; pub use set_local::SetLocal; pub use sub::Sub; use crate::vm::convert::ToBytes; use vm::convert::{Read, TryRead}; mod preludes { pub use super::super::preludes::*; pub use crate::vm::bytecode::{Instruction, Instructions}; pub use crate::vm::opcode::{OperandCode, OperandType}; } use preludes::*; pub enum OperandType { Constant = 0, Add = 1, Pop = 2, Sub = 3, Mul = 4, Div = 5, True = 6, False = 7, Equal = 8, NotEqual = 9, GreaterThan = 10, Minus = 11, Bang = 12, JumpNotTruthy = 13, Jump = 14, Null = 15, GetGlobal = 16, SetGlobal = 17, Array = 18, Hash = 19, Index = 20, Call = 21, ReturnValue = 22, Return = 23, GetLocal = 24, SetLocal = 25, GetBuiltin = 26, Closure = 27, GetFree = 28, CurrentClosure = 29, } impl TryFrom<u8> for OperandType { type Error = anyhow::Error; fn try_from(value: u8) -> Result<Self> { Ok(match value { 0 => Self::Constant, 1 => Self::Add, 2 => Self::Pop, 3 => Self::Sub, 4 => Self::Mul, 5 => Self::Div, 6 => Self::True, 7 => Self::False, 8 => Self::Equal, 9 => Self::NotEqual, 10 => Self::GreaterThan, 11 => Self::Minus, 12 => Self::Bang, 13 => Self::JumpNotTruthy, 14 => Self::Jump, 15 => Self::Null, 16 => Self::GetGlobal, 17 => Self::SetGlobal, 18 => Self::Array, 19 => Self::Hash, 20 => Self::Index, 21 => Self::Call, 22 => Self::ReturnValue, 23 => Self::Return, 24 => Self::GetLocal, 25 => Self::SetLocal, 26 => Self::GetBuiltin, 27 => Self::Closure, 28 => Self::GetFree, 29 => Self::CurrentClosure, bad => return Err(anyhow::format_err!("Unsupported id {}", bad)), }) } } impl From<OperandType> for u8 { fn from(value: OperandType) -> Self { value as u8 } } #[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)] pub enum Opcode { Constant(Constant), Add(Add), Pop(Pop), Sub(Sub), Mul(Mul), Div(Div), True(True), False(False), Equal(Equal), NotEqual(NotEqual), GreaterThan(GreaterThan), Minus(Minus), Bang(Bang), JumpNotTruthy(JumpNotTruthy), Jump(Jump), Null(Null), GetGlobal(GetGlobal), SetGlobal(SetGlobal), Array(Array), Hash(Hash), Index(Index), Call(Call), ReturnValue(ReturnValue), Return(Return), GetLocal(GetLocal), SetLocal(SetLocal), GetBuiltin(GetBuiltin), Closure(Closure), GetFree(GetFree), CurrentClosure(CurrentClosure), } impl Opcode { pub fn to_bytes(&self) -> Vec<u8> { match self { Opcode::Constant(o) => o.to_bytes().to_vec(), Opcode::Add(o) => o.to_bytes().to_vec(), Opcode::Pop(o) => o.to_bytes().to_vec(), Opcode::Sub(o) => o.to_bytes().to_vec(), Opcode::Mul(o) => o.to_bytes().to_vec(), Opcode::Div(o) => o.to_bytes().to_vec(), Opcode::True(o) => o.to_bytes().to_vec(), Opcode::False(o) => o.to_bytes().to_vec(), Opcode::Equal(o) => o.to_bytes().to_vec(), Opcode::NotEqual(o) => o.to_bytes().to_vec(), Opcode::GreaterThan(o) => o.to_bytes().to_vec(), Opcode::Minus(o) => o.to_bytes().to_vec(), Opcode::Bang(o) => o.to_bytes().to_vec(), Opcode::JumpNotTruthy(o) => o.to_bytes().to_vec(), Opcode::Jump(o) => o.to_bytes().to_vec(), Opcode::Null(o) => o.to_bytes().to_vec(), Opcode::GetGlobal(o) => o.to_bytes().to_vec(), Opcode::SetGlobal(o) => o.to_bytes().to_vec(), Opcode::Array(o) => o.to_bytes().to_vec(), Opcode::Hash(o) => o.to_bytes().to_vec(), Opcode::Index(o) => o.to_bytes().to_vec(), Opcode::Call(o) => o.to_bytes().to_vec(), Opcode::ReturnValue(o) => o.to_bytes().to_vec(), Opcode::Return(o) => o.to_bytes().to_vec(), Opcode::GetLocal(o) => o.to_bytes().to_vec(), Opcode::SetLocal(o) => o.to_bytes().to_vec(), Opcode::GetBuiltin(o) => o.to_bytes().to_vec(), Opcode::Closure(o) => o.to_bytes().to_vec(), Opcode::GetFree(o) => o.to_bytes().to_vec(), Opcode::CurrentClosure(o) => o.to_bytes().to_vec(), } } pub fn readsize(&self) -> usize { match self { Opcode::Constant(o) => o.readsize(), Opcode::Add(o) => o.readsize(), Opcode::Pop(o) => o.readsize(), Opcode::Sub(o) => o.readsize(), Opcode::Mul(o) => o.readsize(), Opcode::Div(o) => o.readsize(), Opcode::True(o) => o.readsize(), Opcode::False(o) => o.readsize(), Opcode::Equal(o) => o.readsize(), Opcode::NotEqual(o) => o.readsize(), Opcode::GreaterThan(o) => o.readsize(), Opcode::Minus(o) => o.readsize(), Opcode::Bang(o) => o.readsize(), Opcode::JumpNotTruthy(o) => o.readsize(), Opcode::Jump(o) => o.readsize(), Opcode::Null(o) => o.readsize(), Opcode::GetGlobal(o) => o.readsize(), Opcode::SetGlobal(o) => o.readsize(), Opcode::Array(o) => o.readsize(), Opcode::Hash(o) => o.readsize(), Opcode::Index(o) => o.readsize(), Opcode::Call(o) => o.readsize(), Opcode::ReturnValue(o) => o.readsize(), Opcode::Return(o) => o.readsize(), Opcode::GetLocal(o) => o.readsize(), Opcode::SetLocal(o) => o.readsize(), Opcode::GetBuiltin(o) => o.readsize(), Opcode::Closure(o) => o.readsize(), Opcode::GetFree(o) => o.readsize(), Opcode::CurrentClosure(o) => o.readsize(), } } } impl From<Constant> for Opcode { fn from(value: Constant) -> Self { Opcode::Constant(value) } } impl From<Add> for Opcode { fn from(value: Add) -> Self { Opcode::Add(value) } } impl From<Pop> for Opcode { fn from(value: Pop) -> Self { Opcode::Pop(value) } } impl From<Sub> for Opcode { fn from(value: Sub) -> Self { Opcode::Sub(value) } } impl From<Mul> for Opcode { fn from(value: Mul) -> Self { Opcode::Mul(value) } } impl From<Div> for Opcode { fn from(value: Div) -> Self { Opcode::Div(value) } } impl From<True> for Opcode { fn from(value: True) -> Self { Opcode::True(value) } } impl From<False> for Opcode { fn from(value: False) -> Self { Opcode::False(value) } } impl From<Equal> for Opcode { fn from(value: Equal) -> Self { Opcode::Equal(value) } } impl From<NotEqual> for Opcode { fn from(value: NotEqual) -> Self { Opcode::NotEqual(value) } } impl From<GreaterThan> for Opcode { fn from(value: GreaterThan) -> Self { Opcode::GreaterThan(value) } } impl From<Minus> for Opcode { fn from(value: Minus) -> Self { Opcode::Minus(value) } } impl From<Bang> for Opcode { fn from(value: Bang) -> Self { Opcode::Bang(value) } } impl From<JumpNotTruthy> for Opcode { fn from(value: JumpNotTruthy) -> Self { Opcode::JumpNotTruthy(value) } } impl From<Jump> for Opcode { fn from(value: Jump) -> Self { Opcode::Jump(value) } } impl From<Null> for Opcode { fn from(value: Null) -> Self { Opcode::Null(value) } } impl From<GetGlobal> for Opcode { fn from(value: GetGlobal) -> Self { Opcode::GetGlobal(value) } } impl From<SetGlobal> for Opcode { fn from(value: SetGlobal) -> Self { Opcode::SetGlobal(value) } } impl From<Array> for Opcode { fn from(value: Array) -> Self { Opcode::Array(value) } } impl From<Hash> for Opcode { fn from(value: Hash) -> Self { Opcode::Hash(value) } } impl From<Index> for Opcode { fn from(value: Index) -> Self { Opcode::Index(value) } } impl From<Call> for Opcode { fn from(value: Call) -> Self { Opcode::Call(value) } } impl From<ReturnValue> for Opcode { fn from(value: ReturnValue) -> Self { Opcode::ReturnValue(value) } } impl From<Return> for Opcode { fn from(value: Return) -> Self { Opcode::Return(value) } } impl From<GetLocal> for Opcode { fn from(value: GetLocal) -> Self { Opcode::GetLocal(value) } } impl From<SetLocal> for Opcode { fn from(value: SetLocal) -> Self { Opcode::SetLocal(value) } } impl From<GetBuiltin> for Opcode { fn from(value: GetBuiltin) -> Self { Opcode::GetBuiltin(value) } } impl From<Closure> for Opcode { fn from(value: Closure) -> Self { Opcode::Closure(value) } } impl From<GetFree> for Opcode { fn from(value: GetFree) -> Self { Opcode::GetFree(value) } } impl From<CurrentClosure> for Opcode { fn from(value: CurrentClosure) -> Self { Opcode::CurrentClosure(value) } } impl TryFrom<&[Instruction]> for Opcode { type Error = anyhow::Error; fn try_from(value: &[Instruction]) -> Result<Self> { let ope_type = OperandType::try_from(value[0])?; match ope_type { OperandType::Constant => Ok(Constant(Constant::try_read(&value[1..])?).into()), OperandType::Add => Ok(Add.into()), OperandType::Pop => Ok(Pop.into()), OperandType::Sub => Ok(Sub.into()), OperandType::Mul => Ok(Mul.into()), OperandType::Div => Ok(Div.into()), OperandType::True => Ok(True.into()), OperandType::False => Ok(False.into()), OperandType::Equal => Ok(Equal.into()), OperandType::NotEqual => Ok(NotEqual.into()), OperandType::GreaterThan => Ok(GreaterThan.into()), OperandType::Minus => Ok(Minus.into()), OperandType::Bang => Ok(Bang.into()), OperandType::JumpNotTruthy => { Ok(JumpNotTruthy(JumpNotTruthy::try_read(&value[1..])?).into()) } OperandType::Jump => Ok(Jump(Jump::try_read(&value[1..])?).into()), OperandType::Null => Ok(Null.into()), OperandType::GetGlobal => Ok(GetGlobal(GetGlobal::try_read(&value[1..])?).into()), OperandType::SetGlobal => Ok(SetGlobal(SetGlobal::try_read(&value[1..])?).into()), OperandType::Array => Ok(Array(Array::try_read(&value[1..])?).into()), OperandType::Hash => Ok(Hash(Hash::try_read(&value[1..])?).into()), OperandType::Index => Ok(Index.into()), OperandType::Call => Ok(Call(Call::try_read(&value[1..])?).into()), OperandType::ReturnValue => Ok(ReturnValue.into()), OperandType::Return => Ok(Return.into()), OperandType::GetLocal => Ok(GetLocal(GetLocal::try_read(&value[1..])?).into()), OperandType::SetLocal => Ok(SetLocal(SetLocal::try_read(&value[1..])?).into()), OperandType::GetBuiltin => Ok(GetBuiltin(GetBuiltin::try_read(&value[1..])?).into()), OperandType::Closure => { let res = Closure::try_read(&value[1..])?; Ok(Closure(res.0, res.1).into()) } OperandType::GetFree => Ok(GetFree(GetFree::try_read(&value[1..])?).into()), OperandType::CurrentClosure => Ok(CurrentClosure.into()), } } } impl Display for Opcode { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Self::Constant(o) => write!(f, "{}", o), Self::Add(o) => write!(f, "{}", o), Self::Pop(o) => write!(f, "{}", o), Self::Sub(o) => write!(f, "{}", o), Self::Mul(o) => write!(f, "{}", o), Self::Div(o) => write!(f, "{}", o), Self::True(o) => write!(f, "{}", o), Self::False(o) => write!(f, "{}", o), Self::Equal(o) => write!(f, "{}", o), Self::NotEqual(o) => write!(f, "{}", o), Self::GreaterThan(o) => write!(f, "{}", o), Self::Minus(o) => write!(f, "{}", o), Self::Bang(o) => write!(f, "{}", o), Self::JumpNotTruthy(o) => write!(f, "{}", o), Self::Jump(o) => write!(f, "{}", o), Self::Null(o) => write!(f, "{}", o), Self::GetGlobal(o) => write!(f, "{}", o), Self::SetGlobal(o) => write!(f, "{}", o), Self::Array(o) => write!(f, "{}", o), Self::Hash(o) => write!(f, "{}", o), Self::Index(o) => write!(f, "{}", o), Self::Call(o) => write!(f, "{}", o), Self::ReturnValue(o) => write!(f, "{}", o), Self::Return(o) => write!(f, "{}", o), Self::GetLocal(o) => write!(f, "{}", o), Self::SetLocal(o) => write!(f, "{}", o), Self::GetBuiltin(o) => write!(f, "{}", o), Self::Closure(o) => write!(f, "{}", o), Self::GetFree(o) => write!(f, "{}", o), Self::CurrentClosure(o) => write!(f, "{}", o), } } } pub trait OperandCode { const TYPE: OperandType; fn ope_type(&self) -> OperandType { Self::TYPE } const NAME: &'static str; fn name(&self) -> &'static str { Self::NAME } }
/* * Datadog API V1 Collection * * Collection of all Datadog Public endpoints. * * The version of the OpenAPI document: 1.0 * Contact: support@datadoghq.com * Generated by: https://openapi-generator.tech */ /// SyntheticsCiTestBody : Object describing the synthetics tests to trigger. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SyntheticsCiTestBody { /// Individual synthetics test. #[serde(rename = "tests", skip_serializing_if = "Option::is_none")] pub tests: Option<Vec<crate::models::SyntheticsCiTest>>, } impl SyntheticsCiTestBody { /// Object describing the synthetics tests to trigger. pub fn new() -> SyntheticsCiTestBody { SyntheticsCiTestBody { tests: None, } } }
use num_derive::FromPrimitive; use num_enum::IntoPrimitive; use num_traits::FromPrimitive; use bytes::{ Bytes, Buf, BytesMut, BufMut }; #[derive(FromPrimitive, IntoPrimitive, Debug, PartialEq, Copy, Clone)] #[repr(u8)] pub enum Result { Ok = 0x0, Waiting = 0x3, Done = 0x4, Failed = 0x5 } #[derive(Debug, PartialEq, Clone)] pub struct ReplaceFailedNodeResponse { pub result : Result, } impl ReplaceFailedNodeResponse { pub fn encode(&self, dst: &mut BytesMut) { dst.put_u8(self.result.into()); // dst.put_u8(self.result); } pub fn decode(src: &mut Bytes) -> ReplaceFailedNodeResponse { let result : Result = FromPrimitive::from_u8(src.get_u8()).unwrap(); // let result = src.get_u8(); ReplaceFailedNodeResponse { result } } }
use rand; use std::collections::HashMap; type Point = (f64, f64); type Centers = Vec<Point>; type Labels = Vec<usize>; // Can be moved to its own module fn calculate_euclidian_distance(p: Point, q: Point) -> f64 { let (x1, y1) = p; let (x2, y2) = q; let x = x1 - x2; let y = y1 - y2; return (x * x + y * y).sqrt(); } fn init_random(data: &Vec<Point>, count: usize) -> Centers { let mut rng = rand::thread_rng(); let rnd_indexes = rand::seq::index::sample(&mut rng, data.len(), count); return rnd_indexes.iter().map(|i| data[i]).collect(); } enum Init { // Could add more, like PlusPlus Random, Fixed(Centers), // Fixed centers for the initial pass } // Config keys based on sklearn interface struct Config { n_clusters: usize, init: Init, } impl Config { pub fn default() -> Config { let config = Config { n_clusters: 8, init: Init::Random, }; return config; } pub fn n_clusters(mut self, n_clusters: usize) -> Self { self.n_clusters = n_clusters; return self; } pub fn init(mut self, init: Init) -> Self { self.init = init; return self; } } struct KMeans { config: Config, } impl KMeans { pub fn new(config: Config) -> KMeans { return KMeans { config: config }; } // Deviating a bit from the sklearn interface and returning the labels // and centers instead of saving them internally in the KMeans object. pub fn fit(self, data: Vec<Point>) -> (Labels, Centers) { // TODO: Check data length is more than 0 // TODO: Check data length is less than the desired clusters let mut centers = match self.config.init { Init::Random => init_random(&data, self.config.n_clusters), Init::Fixed(centers) => centers, }; let mut labels: Labels = vec![0; data.len()]; let mut distances: Vec<f64> = vec![0.; data.len()]; // Calculate distances and assign labels for (i, point) in data.iter().enumerate() { let mut closest_distance: Option<f64> = None; let mut closest_index: usize = 0; for (y, center) in centers.iter().enumerate() { let distance = calculate_euclidian_distance(*point, *center); if closest_distance == None || distance < closest_distance.unwrap() { closest_distance = Some(distance); closest_index = y } } labels[i] = closest_index; distances[i] = closest_distance.unwrap(); } let mut new_centers = vec![(0., 0.); centers.len()]; while new_centers != centers { centers = new_centers; new_centers = vec![(0., 0.); centers.len()]; // calculate new centers based on previously assigned clusters // map to sum all points by cluster index and calculate new center points let mut points_accumulator: HashMap<usize, (usize, f64, f64)> = HashMap::new(); for (i, (p1, p2)) in data.iter().enumerate() { let cluster_index = labels[i]; // retrieve entry for the cluster accumulator or add default let acc = points_accumulator .entry(cluster_index) .or_insert((0, 0., 0.)); // add values to entry *acc = (acc.0 + 1, acc.1 + p1, acc.2 + p2); } // calculate and assign new centers in same place in Vec for (i, (count, x, y)) in points_accumulator.iter() { new_centers[*i] = (*x / (*count) as f64, *y / (*count) as f64); } // Calculate distances again and change labels if required for (i, point) in data.iter().enumerate() { let mut closest_distance: Option<f64> = Some(distances[i]); let mut closest_index: usize = labels[i]; for (y, center) in new_centers.iter().enumerate() { let distance = calculate_euclidian_distance(*point, *center); if closest_distance == None || distance < closest_distance.unwrap() { closest_distance = Some(distance); closest_index = y } } labels[i] = closest_index; distances[i] = closest_distance.unwrap(); } } return (labels, centers); } } #[cfg(test)] mod tests { use super::*; #[test] fn fit_fixed_init() { let config = Config::default() .n_clusters(2) .init(Init::Fixed(vec![(1., 2.), (4., 2.)])); let kmeans = KMeans::new(config); let data = vec![(1., 2.), (1., 4.), (1., 0.), (4., 2.), (4., 4.), (4., 0.)]; let (labels, centers) = kmeans.fit(data); assert_eq!(labels, vec![0, 0, 0, 1, 1, 1]); assert_eq!(centers, vec![(1., 2.), (4., 2.)]); } }
#[derive(Debug, Clone)] struct Heap<T: PartialOrd> { queue: Vec<T>, } impl<T: PartialOrd + Clone> Heap<T> { fn new(items: &[T]) -> Self { let mut new_heap = Heap { queue: vec![] }; for i in items { new_heap.add(i.clone()); } new_heap } fn add(&mut self, item: T) { self.queue.push(item); self.bubble_up(self.queue.len() - 1); } fn bubble_up(&mut self, index: usize) { if let Some(parent_index) = get_parent_index(index) { if self.queue[parent_index] > self.queue[index] { self.queue.swap(index, parent_index); self.bubble_up(parent_index); } } } fn bubble_down(&mut self, index: usize) { let first_child_index = get_first_child_index(index); if let Some(first_child) = self.queue.get(first_child_index) { if *first_child < self.queue[index] { self.queue.swap(index, first_child_index); self.bubble_down(first_child_index); } } let second_child_index = first_child_index + 1; if let Some(second_child) = self.queue.get(second_child_index) { if *second_child < self.queue[index] { self.queue.swap(index, second_child_index); self.bubble_down(second_child_index); } } } pub fn extract_min(&mut self) -> Option<T> { if self.queue.is_empty() { return None; } // pop from front let last_index = self.queue.len() - 1; self.queue.swap(0, last_index); let min = self.queue.pop(); self.bubble_down(0); min } } impl<T: PartialOrd + Clone> Iterator for Heap<T> { type Item = T; fn next(&mut self) -> Option<T> { self.extract_min() } } fn get_parent_index(n: usize) -> Option<usize> { if n == 0 { return None; } Some((n - 1) / 2) } fn get_first_child_index(n: usize) -> usize { if n == 0 { return 1; } 2 * n + 1 } pub fn heapsort<T: PartialOrd + Clone>(items: &[T]) -> Vec<T> { let new_heap = Heap::new(items); let mut new_vec = Vec::new(); for i in new_heap { new_vec.push(i); } new_vec } #[cfg(test)] mod tests { use super::*; #[test] fn creating() { let new_heap = Heap::new(&[9]); assert_eq!(new_heap.queue.len(), 1); assert_eq!(new_heap.queue[0], 9); check_correctness(&new_heap); let new_heap = Heap::new(&[1, 9, 2, 8, 3, 7, 4, 6, 5, 0]); assert_eq!(new_heap.queue.len(), 10); assert_eq!(new_heap.queue[0], 0); check_correctness(&new_heap); } #[test] fn getting_parent() { assert_eq!(get_parent_index(0), None); assert_eq!(get_parent_index(1), Some(0)); assert_eq!(get_parent_index(2), Some(0)); assert_eq!(get_parent_index(3), Some(1)); assert_eq!(get_parent_index(4), Some(1)); assert_eq!(get_parent_index(5), Some(2)); } #[test] fn getting_first_child() { assert_eq!(get_first_child_index(0), 1); assert_eq!(get_first_child_index(1), 3); assert_eq!(get_first_child_index(2), 5); assert_eq!(get_first_child_index(3), 7); } fn check_correctness(heap: &Heap<usize>) { for i in 0..heap.queue.len() { if let Some(parent_index) = get_parent_index(i) { assert!(heap.queue[i] > heap.queue[parent_index]); } } } #[test] fn adding() { let mut new_heap = Heap::new(&[9]); new_heap.add(1); assert_eq!(new_heap.queue.len(), 2); assert_eq!(new_heap.queue[0], 1); assert_eq!(new_heap.queue[1], 9); for i in 2..8 { new_heap.add(i); } check_correctness(&new_heap); } #[test] fn extracting_min() { let mut new_heap = Heap::new(&[1, 9, 2, 8, 3, 7, 4, 6, 5, 0]); println!("new heap: {:?}", &new_heap); for i in 0..10 { let min = new_heap.extract_min(); assert_eq!(min, Some(i)); check_correctness(&new_heap); } } #[test] fn iterating() { let new_heap = Heap::new(&[1, 9, 2, 8, 3, 7, 4, 6, 5, 0]); for (i, j) in new_heap.enumerate() { assert_eq!(i, j); } } #[test] fn sorting() { let test1 = &[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]; let sorted = heapsort(test1); assert_eq!(sorted, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); } }
use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; use palette::convert::FromColorUnclamped; use palette::encoding; use palette::{Hsl, Hsv, Hwb, LinSrgb, Srgb}; #[path = "../tests/convert/data_color_mine.rs"] #[allow(dead_code)] mod data_color_mine; use data_color_mine::{load_data, ColorMine}; /* Benches the following conversions: - rgb to linear - rgb to hsl - hsv to hsl - rgb to hsv - hsl to hsv - hwb to hsv - hsv to hwb - xyz to rgb - hsl to rgb - hsv to rgb - linsrgb to rgb - rgb_u8 to linsrgb_f32 - linsrgb_f32 to rgb_u8 */ fn rgb_conversion(c: &mut Criterion) { let mut group = c.benchmark_group("Rgb family"); let colormine: Vec<ColorMine> = load_data(); let rgb: Vec<Srgb> = colormine.iter().map(|x| Srgb::from(x.linear_rgb)).collect(); let rgb_u8: Vec<Srgb<u8>> = rgb.iter().map(|x| x.into_format().into()).collect(); group.throughput(Throughput::Elements(colormine.len() as u64)); group.bench_with_input("rgb to linsrgb", &rgb, |b, rgb| { b.iter(|| { for c in rgb { black_box(c.into_linear()); } }) }); group.bench_with_input("rgb to hsl", &rgb, |b, rgb| { b.iter(|| { for c in rgb { black_box(Hsl::from_color_unclamped(*c)); } }) }); group.bench_with_input("hsv to hsl", &colormine, |b, colormine| { b.iter(|| { for c in colormine { black_box(Hsl::from_color_unclamped(c.hsv)); } }) }); group.bench_with_input("rgb to hsv", &rgb, |b, rgb| { b.iter(|| { for c in rgb { black_box(Hsv::from_color_unclamped(*c)); } }) }); group.bench_with_input("hsl to hsv", &colormine, |b, colormine| { b.iter(|| { for c in colormine { black_box(Hsv::from_color_unclamped(c.hsl)); } }) }); group.bench_with_input("hwb to hsv", &colormine, |b, colormine| { b.iter(|| { for c in colormine { black_box(Hsv::<encoding::Srgb, _>::from_color_unclamped(c.hwb)); } }) }); group.bench_with_input("hsv to hwb", &colormine, |b, colormine| { b.iter(|| { for c in colormine { black_box(Hwb::from_color_unclamped(c.hsv)); } }) }); group.bench_with_input("xyz to linsrgb", &colormine, |b, colormine| { b.iter(|| { for c in colormine { black_box(LinSrgb::from_color_unclamped(c.xyz)); } }) }); group.bench_with_input("hsl to rgb", &colormine, |b, colormine| { b.iter(|| { for c in colormine { black_box(Srgb::from_color_unclamped(c.hsl)); } }) }); group.bench_with_input("hsv to rgb", &colormine, |b, colormine| { b.iter(|| { for c in colormine { black_box(Srgb::from_color_unclamped(c.hsv)); } }) }); group.bench_with_input("linsrgb to rgb", &colormine, |b, colormine| { b.iter(|| { for c in colormine { black_box(Srgb::from_linear(c.linear_rgb)); } }) }); group.bench_with_input("rgb_u8 to linsrgb_f32", &rgb_u8, |b, rgb_u8| { b.iter(|| { for c in rgb_u8 { black_box(c.into_format::<f32>().into_linear()); } }) }); group.bench_with_input("linsrgb_f32 to rgb_u8", &colormine, |b, colormine| { b.iter(|| { for c in colormine { black_box(Srgb::from_linear(c.linear_rgb).into_format::<u8>()); } }) }); group.finish(); } criterion_group!(benches, rgb_conversion); criterion_main!(benches);
/// Marker trait to indicate that borrowed references are stable, /// even when the owning object is moved. pub unsafe trait StableBorrow {} unsafe impl<'a> StableBorrow for &'a str {} unsafe impl<'a, T> StableBorrow for &'a [T] {} unsafe impl StableBorrow for String {} unsafe impl StableBorrow for std::path::PathBuf {} unsafe impl<T> StableBorrow for Vec<T> {} unsafe impl<T> StableBorrow for Box<T> {}
use core::ops::{Index, IndexMut}; use alloc::boxed::Box; use super::*; pub mod file; pub const TABLE_SIZE : usize = 480; pub const FT_ENTRY_ALIGN : usize = 32; pub const ENTRIES_PER_TABLE : usize = TABLE_SIZE / FT_ENTRY_ALIGN; #[repr(u8)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum FileType { Null = 0x00, File = 0x01, Directory = 0x02, Device = 0x03, } pub const FILE_TYPE : [FileType; 4] = [ FileType::Null, FileType::File, FileType::Directory, FileType::Device ]; impl Display for FileType { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { FileType::Null => write!(f, "Empty"), FileType::File => write!(f, "File"), FileType::Directory => write!(f, "Dir"), FileType::Device => write!(f, "Device"), } } } #[repr(C, align(512))] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct FileTable { disk : (u8, u8), sector_index : BlockIndex, entries : [FileTableEntry; ENTRIES_PER_TABLE] } impl Default for FileTable { fn default() -> Self { FileTable::new(0) } } impl FileTable { pub fn new(sector : BlockIndex) -> FileTable { FileTable { disk : (0,1), sector_index : sector, entries : [FileTableEntry::empty(); ENTRIES_PER_TABLE] } } pub fn entries(&self) -> [FileTableEntry; ENTRIES_PER_TABLE] { self.entries } pub fn sector_index(&self) -> BlockIndex { self.sector_index } pub unsafe fn raw_entries(&self) -> (*const FileTableEntry, usize) { (self.entries.as_ptr(), self.entries.len()) } pub fn list(&self) -> impl Iterator<Item = &FileTableEntry> { self.entries.iter().filter(|entry| -> bool { return entry.file_type != FileType::Null; }) } pub fn search(&self, name : &str) -> Option<FileTableEntry> { let cached_name : SmallString = SmallString::from_str(name); for entry in self.list() { if cached_name == entry.name() { return Some(*entry); } } None } pub fn create(&mut self, name : &str) -> Option<FileTableEntry> { let result = if let Some(mut entry) = self.get_first_mut_empty_ref() { entry.1.set_filetype(FileType::File); entry.1.set_filename(name); entry.1.set_table_index(entry.0); Some(entry.1) } else { panic!("Unable To File Empty File Slot, '{}'",name); }; result } fn get_first_mut_empty_ref(&mut self) -> Option<(usize, FileTableEntry)> { for (index, entry) in self.entries.iter().enumerate() { if entry.filetype() == FileType::Null { return Some((index, *entry)); }; } None } pub fn update_on_disk(&self) { let ptr = ConstPointer::from(self, size_of!(FileTable)); (*ptr.cast::<Block>()).save(self.disk.0, self.disk.1, self.sector_index) } pub fn load_root(disk : (u8, u8)) -> FileTable { let ptr = ConstPointer::from(&Block::load(disk.0, disk.1, 0), size_of!(FileTable)); let mut table = *ptr.cast::<FileTable>(); table.set_disk(disk); table } pub fn set_disk(&mut self, disk : (u8, u8)) { self.disk = disk; } } #[repr(C, align(32))] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct FileTableEntry { file_type : FileType, metanode_index : BlockIndex, bus : u8, disk : u8, table_index : usize, name : SmallString, } impl FileTableEntry { pub fn name(&self) -> SmallString { self.name } pub fn empty() -> Self { Self { table_index : 0, metanode_index : 0, bus : 0, disk : 1, file_type : FileType::Null, name : SmallString::from_str(""), } } pub fn new_file<'a>(table_index : usize, name : &'a str, disk : &Disk, index : BlockIndex) -> Self { Self { metanode_index : index, table_index, file_type : FileType::File, bus : disk.bus, disk : disk.drive, name : SmallString::from_str(name), } } pub fn index(&self) -> BlockIndex { self.metanode_index } pub fn table_index(&self) -> usize { self.table_index } pub fn is_file(&self) -> bool { self.file_type == FileType::File } pub fn is_device(&self) -> bool { self.file_type == FileType::Device } pub fn is_dir(&self) -> bool { self.file_type == FileType::Directory } pub fn update_index(&mut self, index : BlockIndex) { self.metanode_index = index; } pub fn set_filetype(&mut self, filetype : FileType) { self.file_type = filetype; } pub fn set_filename(&mut self, name : &str) { self.name = SmallString::from_str(name); } pub fn set_table_index(&mut self, index : usize) { self.table_index = index; } pub fn filetype(&self) -> FileType { self.file_type } pub fn disk_id(&self) -> u8 { self.disk } pub fn bus_id(&self) -> u8 { self.bus } pub fn get_fileinfo(&self) -> FileInfo { let mut ptr = ConstPointer::from(&Block::from_disk_b(self.metanode_index), size_of!(Block)); *ptr.cast::<FileInfo>() } } impl Into<FileTable> for FileTableEntry { fn into(self) -> FileTable { let disk = get_disk(self.bus,self.disk).unwrap(); let sector = get_sector(&disk, self.metanode_index); sector.into() } } impl<> Into<FileTable> for Sector { fn into(self) -> FileTable { FileTable::new(0) } } impl Into<Sector> for FileTable { fn into(self) -> Sector { let sector = Sector::new(self.sector_index()); sector } } impl Into<FileTableEntry> for [u8; FT_ENTRY_ALIGN] { fn into(self) -> FileTableEntry { let ptr = ConstPointer::from(&self, FT_ENTRY_ALIGN); *ptr.cast::<FileTableEntry>() } } impl Into<[u8; FT_ENTRY_ALIGN]> for FileTableEntry { fn into(self) -> [u8; FT_ENTRY_ALIGN] { let ptr = ConstPointer::from(&self, FT_ENTRY_ALIGN); let mut buffer = [0; FT_ENTRY_ALIGN]; ptr.copy_bytes(&mut buffer); buffer } } impl Index<usize> for FileTable { type Output = FileTableEntry; fn index(&self, index: usize) -> &Self::Output { &self.entries[index] } } impl IndexMut<usize> for FileTable { fn index_mut(&mut self, index: usize) -> &mut Self::Output { &mut self.entries[index] } } const SMALL_STR_LEN : usize = 8; #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct SmallString { data : [u8; SMALL_STR_LEN] } impl SmallString { pub fn from_str(text : &str) -> SmallString { assert!(text.len() <= SMALL_STR_LEN); let mut buffer : [u8; SMALL_STR_LEN] = [0;SMALL_STR_LEN]; for (index, byte) in text.bytes().enumerate() { buffer[index] = byte; } SmallString { data : buffer } } /// Converts this into a &[String] by appending the character data onto the end of the /// provided &[String] pub fn to_string(&self, text : &mut String) { for byte in self.data { if byte == 0 {return} text.push(byte as char); } } } impl Display for SmallString { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let mut s = String::new(); self.to_string(&mut s); write!(f, "{}", s) } }
//! This module contains a [`Padding`] setting of a cell on a [`Table`]. //! //! # Example //! #![cfg_attr(feature = "std", doc = "```")] #![cfg_attr(not(feature = "std"), doc = "```ignore")] //! use tabled::{Table, settings::{Padding, Style, Modify, object::Cell}}; //! //! let table = Table::new("2022".chars()) //! .with(Style::modern()) //! .with(Modify::new((2, 0)).with(Padding::new(1, 1, 2, 2))) //! .to_string(); //! //! assert_eq!( //! table, //! concat!( //! "┌──────┐\n", //! "│ char │\n", //! "├──────┤\n", //! "│ 2 │\n", //! "├──────┤\n", //! "│ │\n", //! "│ │\n", //! "│ 0 │\n", //! "│ │\n", //! "│ │\n", //! "├──────┤\n", //! "│ 2 │\n", //! "├──────┤\n", //! "│ 2 │\n", //! "└──────┘", //! ), //! ); //! ``` //! //! [`Table`]: crate::Table use crate::{ grid::{ color::StaticColor, config::{CompactConfig, CompactMultilineConfig}, config::{Indent, Sides}, }, settings::TableOption, }; #[cfg(feature = "std")] use crate::grid::{color::AnsiColor, config::ColoredConfig, config::Entity}; #[cfg(feature = "std")] use crate::settings::CellOption; /// Padding is responsible for a left/right/top/bottom inner indent of a particular cell. /// #[cfg_attr(feature = "std", doc = "```")] #[cfg_attr(not(feature = "std"), doc = "```ignore")] /// # use tabled::{settings::{Style, Padding, object::Rows, Modify}, Table}; /// # let data: Vec<&'static str> = Vec::new(); /// let table = Table::new(&data).with(Modify::new(Rows::single(0)).with(Padding::new(0, 0, 1, 1).fill('>', '<', '^', 'V'))); /// ``` #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct Padding<C = StaticColor> { indent: Sides<Indent>, colors: Option<Sides<C>>, } impl Padding { /// Construct's an Padding object. /// /// It uses space(' ') as a default fill character. /// To set a custom character you can use [`Padding::fill`] function. pub const fn new(left: usize, right: usize, top: usize, bottom: usize) -> Self { Self { indent: Sides::new( Indent::spaced(left), Indent::spaced(right), Indent::spaced(top), Indent::spaced(bottom), ), colors: None, } } /// Construct's an Padding object with all sides set to 0. /// /// It uses space(' ') as a default fill character. /// To set a custom character you can use [`Padding::fill`] function. pub const fn zero() -> Self { Self::new(0, 0, 0, 0) } } impl<Color> Padding<Color> { /// The function, sets a characters for the padding on an each side. pub const fn fill(mut self, left: char, right: char, top: char, bottom: char) -> Self { self.indent.left.fill = left; self.indent.right.fill = right; self.indent.top.fill = top; self.indent.bottom.fill = bottom; self } /// The function, sets a characters for the padding on an each side. pub fn colorize<C>(self, left: C, right: C, top: C, bottom: C) -> Padding<C> { Padding { indent: self.indent, colors: Some(Sides::new(left, right, top, bottom)), } } } #[cfg(feature = "std")] impl<R, C> CellOption<R, ColoredConfig> for Padding<C> where C: Into<AnsiColor<'static>> + Clone, { fn change(self, _: &mut R, cfg: &mut ColoredConfig, entity: Entity) { let indent = self.indent; let pad = Sides::new(indent.left, indent.right, indent.top, indent.bottom); cfg.set_padding(entity, pad); if let Some(colors) = &self.colors { let pad = Sides::new( Some(colors.left.clone().into()), Some(colors.right.clone().into()), Some(colors.top.clone().into()), Some(colors.bottom.clone().into()), ); cfg.set_padding_color(entity, pad); } } } #[cfg(feature = "std")] impl<R, D, C> TableOption<R, D, ColoredConfig> for Padding<C> where C: Into<AnsiColor<'static>> + Clone, { fn change(self, records: &mut R, cfg: &mut ColoredConfig, _: &mut D) { <Self as CellOption<R, ColoredConfig>>::change(self, records, cfg, Entity::Global) } } impl<R, D, C> TableOption<R, D, CompactConfig> for Padding<C> where C: Into<StaticColor> + Clone, { fn change(self, _: &mut R, cfg: &mut CompactConfig, _: &mut D) { *cfg = cfg.set_padding(self.indent); if let Some(c) = self.colors { let colors = Sides::new(c.left.into(), c.right.into(), c.top.into(), c.bottom.into()); *cfg = cfg.set_padding_color(colors); } } } impl<R, D, C> TableOption<R, D, CompactMultilineConfig> for Padding<C> where C: Into<StaticColor> + Clone, { fn change(self, records: &mut R, cfg: &mut CompactMultilineConfig, dimension: &mut D) { self.change(records, cfg.as_mut(), dimension) } }
#[macro_use] extern crate log; mod utils; mod rtc_crypto; use yew::{html, Callback, MouseEvent, Component, ComponentLink, Html, ShouldRender}; use wasm_bindgen::prelude::*; use yew_mdc::components::*; // When the `wee_alloc` feature is enabled, use `wee_alloc` as the global // allocator. #[cfg(feature = "wee_alloc")] #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; #[wasm_bindgen] extern { fn alert(s: &str); } struct App { clicked: bool, click_count: u32, onclick: Callback<MouseEvent>, } enum Msg { Click, } impl Component for App { type Message = Msg; type Properties = (); fn create(_: Self::Properties, link: ComponentLink<Self>) -> Self { App { clicked: false, click_count: 0, onclick: link.callback(|_| Msg::Click), } } fn update(&mut self, msg: Self::Message) -> ShouldRender { match msg { Msg::Click => { self.clicked = true; self.click_count += 1; true // Indicate that the Component should re-render } } } fn view(&self) -> Html { let button_text = if self.clicked { format!("Clicked {} times!", self.click_count) } else { "Click me!".to_owned() }; html! { <> <Button text="Log In" style=button::Style::Raised onclick=&self.onclick /> <button onclick=&self.onclick>{ button_text }</button> </> } } } #[wasm_bindgen] pub fn start_app() { wasm_logger::init(wasm_logger::Config::default()); rtc_crypto::example(); info!("Oh hi, how's it going? thing"); yew::start_app::<App>(); info!("We done now I guess"); }
use std::{ collections::HashMap, fmt, net::{IpAddr, SocketAddr}, }; #[derive(PartialEq, Hash, Eq, Clone, PartialOrd, Ord, Debug, Copy)] pub enum Protocol { Tcp, Udp, } impl Protocol { #[allow(dead_code)] pub fn from_str(string: &str) -> Option<Self> { match string { "TCP" => Some(Protocol::Tcp), "UDP" => Some(Protocol::Udp), _ => None, } } } impl fmt::Display for Protocol { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Protocol::Tcp => write!(f, "tcp"), Protocol::Udp => write!(f, "udp"), } } } #[derive(Clone, Ord, PartialOrd, PartialEq, Eq, Hash, Debug, Copy)] pub struct Socket { pub ip: IpAddr, pub port: u16, } #[derive(PartialEq, Hash, Eq, Clone, PartialOrd, Ord, Debug, Copy)] pub struct LocalSocket { pub ip: IpAddr, pub port: u16, pub protocol: Protocol, } #[derive(PartialEq, Hash, Eq, Clone, PartialOrd, Ord, Debug, Copy)] pub struct Connection { pub remote_socket: Socket, pub local_socket: LocalSocket, } pub fn display_ip_or_host(ip: IpAddr, ip_to_host: &HashMap<IpAddr, String>) -> String { match ip_to_host.get(&ip) { Some(host) => host.clone(), None => ip.to_string(), } } pub fn display_connection_string( connection: &Connection, ip_to_host: &HashMap<IpAddr, String>, interface_name: &str, ) -> String { format!( "<{interface_name}>:{} => {}:{} ({})", connection.local_socket.port, display_ip_or_host(connection.remote_socket.ip, ip_to_host), connection.remote_socket.port, connection.local_socket.protocol, ) } impl Connection { pub fn new( remote_socket: SocketAddr, local_ip: IpAddr, local_port: u16, protocol: Protocol, ) -> Self { Connection { remote_socket: Socket { ip: remote_socket.ip(), port: remote_socket.port(), }, local_socket: LocalSocket { ip: local_ip, port: local_port, protocol, }, } } }
pub fn gray_code(n: i32) -> Vec<i32> { if n == 0 { return vec![0] } let m = 2usize.pow(n as u32); let mut graycode = vec![0; m]; graycode[1] = 1; let mut delta = 2; for i in 1..n as usize { for j in 0..delta { graycode[delta*2-1-j] = graycode[j] + delta as i32; } delta *= 2; } graycode }
use crate::scene::Scene; use crate::spawns::*; pub trait Factory<E: Entity> { fn init(&mut self, group: Group); fn build(&self, spawn: &Spawn) -> E; } pub trait System<E: Entity> { fn requirements(&self, target: &E) -> bool; fn update(&mut self, spawn: &Spawn, scene: &mut Scene<E>); } pub trait Entity: Default + Clone {} pub trait Component: Default { fn set_active(&mut self, activate: bool); fn is_active(&self) -> &bool; fn active() -> Self { let mut instance = Self::default(); instance.set_active(true); instance } fn inactive() -> Self { let mut instance = Self::default(); instance.set_active(false); instance } }
use crate::adc::{AdcPin, Instance}; use core::convert::Infallible; use core::marker::PhantomData; use cortex_m::delay::Delay; use embassy::util::Unborrow; use embassy_extras::unborrow; use embedded_hal::blocking::delay::{DelayMs, DelayUs}; pub const VDDA_CALIB_MV: u32 = 3000; pub enum Resolution { TwelveBit, TenBit, EightBit, SixBit, } impl Default for Resolution { fn default() -> Self { Self::TwelveBit } } impl Resolution { fn res(&self) -> crate::pac::adc::vals::Res { match self { Resolution::TwelveBit => crate::pac::adc::vals::Res::TWELVEBIT, Resolution::TenBit => crate::pac::adc::vals::Res::TENBIT, Resolution::EightBit => crate::pac::adc::vals::Res::EIGHTBIT, Resolution::SixBit => crate::pac::adc::vals::Res::SIXBIT, } } fn to_max_count(&self) -> u32 { match self { Resolution::TwelveBit => (1 << 12) - 1, Resolution::TenBit => (1 << 10) - 1, Resolution::EightBit => (1 << 8) - 1, Resolution::SixBit => (1 << 6) - 1, } } } pub struct Vref; impl<T: Instance> AdcPin<T> for Vref {} impl<T: Instance> super::sealed::AdcPin<T> for Vref { fn channel(&self) -> u8 { 0 } } pub struct Temperature; impl<T: Instance> AdcPin<T> for Temperature {} impl<T: Instance> super::sealed::AdcPin<T> for Temperature { fn channel(&self) -> u8 { 17 } } pub struct Vbat; impl<T: Instance> AdcPin<T> for Vbat {} impl<T: Instance> super::sealed::AdcPin<T> for Vbat { fn channel(&self) -> u8 { 18 } } /// ADC sample time /// /// The default setting is 2.5 ADC clock cycles. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)] pub enum SampleTime { /// 2.5 ADC clock cycles Cycles2_5 = 0b000, /// 6.5 ADC clock cycles Cycles6_5 = 0b001, /// 12.5 ADC clock cycles Cycles12_5 = 0b010, /// 24.5 ADC clock cycles Cycles24_5 = 0b011, /// 47.5 ADC clock cycles Cycles47_5 = 0b100, /// 92.5 ADC clock cycles Cycles92_5 = 0b101, /// 247.5 ADC clock cycles Cycles247_5 = 0b110, /// 640.5 ADC clock cycles Cycles640_5 = 0b111, } impl SampleTime { fn sample_time(&self) -> crate::pac::adc::vals::SampleTime { match self { SampleTime::Cycles2_5 => crate::pac::adc::vals::SampleTime::CYCLES2_5, SampleTime::Cycles6_5 => crate::pac::adc::vals::SampleTime::CYCLES6_5, SampleTime::Cycles12_5 => crate::pac::adc::vals::SampleTime::CYCLES12_5, SampleTime::Cycles24_5 => crate::pac::adc::vals::SampleTime::CYCLES24_5, SampleTime::Cycles47_5 => crate::pac::adc::vals::SampleTime::CYCLES47_5, SampleTime::Cycles92_5 => crate::pac::adc::vals::SampleTime::CYCLES92_5, SampleTime::Cycles247_5 => crate::pac::adc::vals::SampleTime::CYCLES247_5, SampleTime::Cycles640_5 => crate::pac::adc::vals::SampleTime::CYCLES640_5, } } } impl Default for SampleTime { fn default() -> Self { Self::Cycles2_5 } } pub struct Adc<'d, T: Instance> { sample_time: SampleTime, calibrated_vdda: u32, resolution: Resolution, phantom: PhantomData<&'d mut T>, } impl<'d, T: Instance> Adc<'d, T> { pub fn new(_peri: impl Unborrow<Target = T> + 'd, mut delay: Delay) -> (Self, Delay) { unborrow!(_peri); unsafe { T::regs().cr().modify(|reg| { reg.set_deeppwd(false); reg.set_advregen(true); }); } delay.delay_us(20); unsafe { while T::regs().cr().read().adcal() { // spin } } delay.delay_us(1); ( Self { sample_time: Default::default(), resolution: Resolution::default(), calibrated_vdda: VDDA_CALIB_MV, phantom: PhantomData, }, delay, ) } pub fn enable_vref(&self, mut delay: Delay) -> (Vref, Delay) { unsafe { T::common_regs().ccr().modify(|reg| { reg.set_vrefen(true); }); } // "Table 24. Embedded internal voltage reference" states that it takes a maximum of 12 us // to stabilize the internal voltage reference, we wait a little more. // TODO: delay 15us //cortex_m::asm::delay(20_000_000); delay.delay_us(15); (Vref {}, delay) } pub fn enable_temperature(&self) -> Temperature { unsafe { T::common_regs().ccr().modify(|reg| { reg.set_ch17sel(true); }); } Temperature {} } pub fn enable_vbat(&self) -> Vbat { unsafe { T::common_regs().ccr().modify(|reg| { reg.set_ch18sel(true); }); } Vbat {} } /// Calculates the system VDDA by sampling the internal VREF channel and comparing /// the result with the value stored at the factory. If the chip's VDDA is not stable, run /// this before each ADC conversion. fn calibrate(&mut self, vref: &mut Vref) { let vref_cal = unsafe { crate::pac::VREFINTCAL.data().read().value() }; let old_sample_time = self.sample_time; // "Table 24. Embedded internal voltage reference" states that the sample time needs to be // at a minimum 4 us. With 640.5 ADC cycles we have a minimum of 8 us at 80 MHz, leaving // some headroom. self.sample_time = SampleTime::Cycles640_5; // This can't actually fail, it's just in a result to satisfy hal trait let vref_samp = self.read(vref); self.sample_time = old_sample_time; self.calibrated_vdda = (VDDA_CALIB_MV * u32::from(vref_cal)) / u32::from(vref_samp); } pub fn set_sample_time(&mut self, sample_time: SampleTime) { self.sample_time = sample_time; } pub fn set_resolution(&mut self, resolution: Resolution) { self.resolution = resolution; } /// Convert a measurement to millivolts pub fn to_millivolts(&self, sample: u16) -> u16 { ((u32::from(sample) * self.calibrated_vdda) / self.resolution.to_max_count()) as u16 } /* /// Convert a raw sample from the `Temperature` to deg C pub fn to_degrees_centigrade(sample: u16) -> f32 { (130.0 - 30.0) / (VtempCal130::get().read() as f32 - VtempCal30::get().read() as f32) * (sample as f32 - VtempCal30::get().read() as f32) + 30.0 } */ pub fn read(&mut self, pin: &mut impl AdcPin<T>) -> u16 { let v = pin.channel(); unsafe { // Make sure bits are off while T::regs().cr().read().addis() { // spin } // Enable ADC T::regs().isr().modify(|reg| { reg.set_adrdy(true); }); T::regs().cr().modify(|reg| { reg.set_aden(true); }); while !T::regs().isr().read().adrdy() { // spin } // Configure ADC T::regs() .cfgr() .modify(|reg| reg.set_res(self.resolution.res())); // Configure channel Self::set_channel_sample_time(pin.channel(), self.sample_time); // Select channel T::regs().sqr1().write(|reg| reg.set_sq(0, pin.channel())); // Start conversion T::regs().isr().modify(|reg| { reg.set_eos(true); reg.set_eoc(true); }); T::regs().cr().modify(|reg| { reg.set_adstart(true); }); while !T::regs().isr().read().eos() { // spin } // Read ADC value first time and discard it, as per errata sheet. // The errata states that if we do conversions slower than 1 kHz, the // first read ADC value can be corrupted, so we discard it and measure again. let _ = T::regs().dr().read(); T::regs().isr().modify(|reg| { reg.set_eos(true); reg.set_eoc(true); }); T::regs().cr().modify(|reg| { reg.set_adstart(true); }); while !T::regs().isr().read().eos() { // spin } let val = T::regs().dr().read().0 as u16; T::regs().cr().modify(|reg| reg.set_addis(true)); val } } unsafe fn set_channel_sample_time(ch: u8, sample_time: SampleTime) { if ch >= 0 && ch <= 9 { T::regs() .smpr1() .modify(|reg| reg.set_smp(ch as _, sample_time.sample_time())); } else { T::regs() .smpr2() .modify(|reg| reg.set_smp((ch - 10) as _, sample_time.sample_time())); } } }
use actix_web::HttpResponse; pub use index3ds_common::*; pub trait ToHttpResponse { fn http(&self) -> HttpResponse; } impl ToHttpResponse for NcchInfoResponse { fn http(&self) -> HttpResponse { match self { NcchInfoResponse::Ok(_) => HttpResponse::Ok(), NcchInfoResponse::NotFound => HttpResponse::NotFound(), NcchInfoResponse::InternalServerError => HttpResponse::InternalServerError(), } .json(self) } } impl ToHttpResponse for PostNcchResponse { fn http(&self) -> HttpResponse { match self { PostNcchResponse::Finished(_) | PostNcchResponse::AppendNeeded(_) => HttpResponse::Ok(), PostNcchResponse::AlreadyFinished | PostNcchResponse::UnexpectedLength | PostNcchResponse::UnexpectedFormat | PostNcchResponse::VerificationFailed => HttpResponse::BadRequest(), PostNcchResponse::Busy => HttpResponse::ServiceUnavailable(), PostNcchResponse::Conflict(_) => HttpResponse::Conflict(), PostNcchResponse::InternalServerError => HttpResponse::InternalServerError(), PostNcchResponse::NotFound => HttpResponse::NotFound(), } .json(self) } } impl ToHttpResponse for NcchQueryResponse { fn http(&self) -> HttpResponse { match self { NcchQueryResponse::Ok(_) => HttpResponse::Ok(), NcchQueryResponse::InternalServerError => HttpResponse::InternalServerError(), } .json(self) } } impl ToHttpResponse for NcchQueryCountResponse { fn http(&self) -> HttpResponse { match self { NcchQueryCountResponse::Ok(_) => HttpResponse::Ok(), NcchQueryCountResponse::InternalServerError => HttpResponse::InternalServerError(), } .json(self) } }
use std::env; use std::io::Write; use env_logger; pub fn set_logger() { match env::var("RUST_LOG") { Ok(val) => { let log_level: &str = &val; match log_level { "error" => { /* noop */ } "warn" => { /* noop */ } "info" => { /* noop */ } "debug" => { /* noop */ } _ => env::set_var("RUST_LOG", "info"), } } Err(_e) => env::set_var("RUST_LOG", "info"), }; env_logger::Builder::from_default_env() .format(|buf, record| { let ts = buf.timestamp(); writeln!( buf, "[{} {} {} {}:{}] {}", ts, record.level(), record.target(), record.file().unwrap_or("unknown"), record.line().unwrap_or(0), record.args(), ) }) .init(); } pub fn set_http_logger() { match env::var("RUST_LOG") { Ok(val) => { let log_level: &str = &val; match log_level { "error" => { /* noop */ } "warn" => { /* noop */ } "info" => { /* noop */ } "debug" => { /* noop */ } _ => env::set_var("RUST_LOG", "info"), } } Err(_e) => env::set_var("RUST_LOG", "info"), }; env_logger::Builder::from_default_env() .format(|buf, record| { let ts = buf.timestamp(); writeln!(buf, "[{} {}] {}", ts, record.level(), record.args(),) }) .init(); }
//! Traits for testing code that uses [`Instant`] and [`SystemTime`]. //! //! [`Instant`]: std::time::Instant //! [`SystemTime`]: std::time::SystemTime use core::{ops::Add, time::Duration}; use std::{ error::Error, time::{Instant, SystemTime, SystemTimeError}, }; use thiserror::Error; pub trait InstantLike: Add<Duration, Output = Self> + Sized {} pub trait SystemTimeLike: Copy { type Error: Error + Send + Sync + 'static; const UNIX_EPOCH: Self; fn duration_since(&self, earlier: Self) -> Result<Duration, Self::Error>; } impl InstantLike for Instant {} impl SystemTimeLike for SystemTime { type Error = SystemTimeError; const UNIX_EPOCH: Self = Self::UNIX_EPOCH; fn duration_since(&self, earlier: Self) -> Result<Duration, Self::Error> { self.duration_since(earlier) } } /// Time as a [`Duration`] after the Unix epoch. /// /// Representing time this way lets us avoid reimplementing all the time arithmetic. /// We cannot represent times before the Unix epoch, but that is not needed in this project. pub type Timespec = Duration; #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct FakeInstant(pub Timespec); #[allow(clippy::module_name_repetitions)] #[derive(Clone, Copy)] pub struct FakeSystemTime(pub Timespec); #[derive(Debug, Error)] #[error("{0:?}")] pub struct FakeSystemTimeError(pub Duration); impl Add<Duration> for FakeInstant { type Output = Self; fn add(self, rhs: Duration) -> Self::Output { Self(self.0 + rhs) } } impl InstantLike for FakeInstant {} impl SystemTimeLike for FakeSystemTime { type Error = FakeSystemTimeError; const UNIX_EPOCH: Self = Self(Duration::from_secs(0)); fn duration_since(&self, earlier: Self) -> Result<Duration, Self::Error> { let later = self.0; let earlier = earlier.0; later .checked_sub(earlier) .ok_or_else(|| FakeSystemTimeError(earlier - later)) } } #[cfg(test)] mod system_time_tests { use super::*; #[test] fn has_excellent_test_coverage() { let duration = Duration::from_secs(10000); let earlier = <SystemTime as SystemTimeLike>::UNIX_EPOCH; let later = <SystemTime as SystemTimeLike>::UNIX_EPOCH + duration; let difference = SystemTimeLike::duration_since(&later, earlier).expect("earlier < later"); assert_eq!(difference, duration); } }
#[doc = "Reader of register TRANSMIT_WINDOW_SIZE"] pub type R = crate::R<u32, super::TRANSMIT_WINDOW_SIZE>; #[doc = "Writer for register TRANSMIT_WINDOW_SIZE"] pub type W = crate::W<u32, super::TRANSMIT_WINDOW_SIZE>; #[doc = "Register TRANSMIT_WINDOW_SIZE `reset()`'s with value 0"] impl crate::ResetValue for super::TRANSMIT_WINDOW_SIZE { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `TX_WINDOW_SIZE`"] pub type TX_WINDOW_SIZE_R = crate::R<u8, u8>; #[doc = "Write proxy for field `TX_WINDOW_SIZE`"] pub struct TX_WINDOW_SIZE_W<'a> { w: &'a mut W, } impl<'a> TX_WINDOW_SIZE_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | ((value as u32) & 0xff); self.w } } impl R { #[doc = "Bits 0:7 - window_size along with the window_offset is used to calculate the first connection point anchor point for the master. This shall be a multiple of 1.25 ms in the range of 1.25 ms to the lesser of 10 ms and (connInterval - 1.25 ms). Values range from 0 to 10 ms."] #[inline(always)] pub fn tx_window_size(&self) -> TX_WINDOW_SIZE_R { TX_WINDOW_SIZE_R::new((self.bits & 0xff) as u8) } } impl W { #[doc = "Bits 0:7 - window_size along with the window_offset is used to calculate the first connection point anchor point for the master. This shall be a multiple of 1.25 ms in the range of 1.25 ms to the lesser of 10 ms and (connInterval - 1.25 ms). Values range from 0 to 10 ms."] #[inline(always)] pub fn tx_window_size(&mut self) -> TX_WINDOW_SIZE_W { TX_WINDOW_SIZE_W { w: self } } }
#[doc = "Register `MACFFR` reader"] pub type R = crate::R<MACFFR_SPEC>; #[doc = "Register `MACFFR` writer"] pub type W = crate::W<MACFFR_SPEC>; #[doc = "Field `PM` reader - Promiscuous mode"] pub type PM_R = crate::BitReader<PM_A>; #[doc = "Promiscuous mode\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum PM_A { #[doc = "0: Normal address filtering"] Disabled = 0, #[doc = "1: Address filters pass all incoming frames regardless of their destination or source address"] Enabled = 1, } impl From<PM_A> for bool { #[inline(always)] fn from(variant: PM_A) -> Self { variant as u8 != 0 } } impl PM_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PM_A { match self.bits { false => PM_A::Disabled, true => PM_A::Enabled, } } #[doc = "Normal address filtering"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == PM_A::Disabled } #[doc = "Address filters pass all incoming frames regardless of their destination or source address"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == PM_A::Enabled } } #[doc = "Field `PM` writer - Promiscuous mode"] pub type PM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, PM_A>; impl<'a, REG, const O: u8> PM_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Normal address filtering"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(PM_A::Disabled) } #[doc = "Address filters pass all incoming frames regardless of their destination or source address"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(PM_A::Enabled) } } #[doc = "Field `HU` reader - Hash unicast"] pub type HU_R = crate::BitReader<HU_A>; #[doc = "Hash unicast\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HU_A { #[doc = "0: MAC performs a perfect destination address filtering for unicast frames"] Perfect = 0, #[doc = "1: MAC performs destination address filtering of received unicast frames according to the hash table"] Hash = 1, } impl From<HU_A> for bool { #[inline(always)] fn from(variant: HU_A) -> Self { variant as u8 != 0 } } impl HU_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> HU_A { match self.bits { false => HU_A::Perfect, true => HU_A::Hash, } } #[doc = "MAC performs a perfect destination address filtering for unicast frames"] #[inline(always)] pub fn is_perfect(&self) -> bool { *self == HU_A::Perfect } #[doc = "MAC performs destination address filtering of received unicast frames according to the hash table"] #[inline(always)] pub fn is_hash(&self) -> bool { *self == HU_A::Hash } } #[doc = "Field `HU` writer - Hash unicast"] pub type HU_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, HU_A>; impl<'a, REG, const O: u8> HU_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "MAC performs a perfect destination address filtering for unicast frames"] #[inline(always)] pub fn perfect(self) -> &'a mut crate::W<REG> { self.variant(HU_A::Perfect) } #[doc = "MAC performs destination address filtering of received unicast frames according to the hash table"] #[inline(always)] pub fn hash(self) -> &'a mut crate::W<REG> { self.variant(HU_A::Hash) } } #[doc = "Field `HM` reader - Hash multicast"] pub type HM_R = crate::BitReader<HM_A>; #[doc = "Hash multicast\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HM_A { #[doc = "0: MAC performs a perfect destination address filtering for multicast frames"] Perfect = 0, #[doc = "1: MAC performs destination address filtering of received multicast frames according to the hash table"] Hash = 1, } impl From<HM_A> for bool { #[inline(always)] fn from(variant: HM_A) -> Self { variant as u8 != 0 } } impl HM_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> HM_A { match self.bits { false => HM_A::Perfect, true => HM_A::Hash, } } #[doc = "MAC performs a perfect destination address filtering for multicast frames"] #[inline(always)] pub fn is_perfect(&self) -> bool { *self == HM_A::Perfect } #[doc = "MAC performs destination address filtering of received multicast frames according to the hash table"] #[inline(always)] pub fn is_hash(&self) -> bool { *self == HM_A::Hash } } #[doc = "Field `HM` writer - Hash multicast"] pub type HM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, HM_A>; impl<'a, REG, const O: u8> HM_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "MAC performs a perfect destination address filtering for multicast frames"] #[inline(always)] pub fn perfect(self) -> &'a mut crate::W<REG> { self.variant(HM_A::Perfect) } #[doc = "MAC performs destination address filtering of received multicast frames according to the hash table"] #[inline(always)] pub fn hash(self) -> &'a mut crate::W<REG> { self.variant(HM_A::Hash) } } #[doc = "Field `DAIF` reader - Destination address unique filtering"] pub type DAIF_R = crate::BitReader<DAIF_A>; #[doc = "Destination address unique filtering\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum DAIF_A { #[doc = "0: Normal filtering of frames"] Normal = 0, #[doc = "1: Address check block operates in inverse filtering mode for the DA address comparison"] Invert = 1, } impl From<DAIF_A> for bool { #[inline(always)] fn from(variant: DAIF_A) -> Self { variant as u8 != 0 } } impl DAIF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DAIF_A { match self.bits { false => DAIF_A::Normal, true => DAIF_A::Invert, } } #[doc = "Normal filtering of frames"] #[inline(always)] pub fn is_normal(&self) -> bool { *self == DAIF_A::Normal } #[doc = "Address check block operates in inverse filtering mode for the DA address comparison"] #[inline(always)] pub fn is_invert(&self) -> bool { *self == DAIF_A::Invert } } #[doc = "Field `DAIF` writer - Destination address unique filtering"] pub type DAIF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, DAIF_A>; impl<'a, REG, const O: u8> DAIF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Normal filtering of frames"] #[inline(always)] pub fn normal(self) -> &'a mut crate::W<REG> { self.variant(DAIF_A::Normal) } #[doc = "Address check block operates in inverse filtering mode for the DA address comparison"] #[inline(always)] pub fn invert(self) -> &'a mut crate::W<REG> { self.variant(DAIF_A::Invert) } } #[doc = "Field `PAM` reader - Pass all multicast"] pub type PAM_R = crate::BitReader<PAM_A>; #[doc = "Pass all multicast\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum PAM_A { #[doc = "0: Filtering of multicast frames depends on HM"] Disabled = 0, #[doc = "1: All received frames with a multicast destination address are passed"] Enabled = 1, } impl From<PAM_A> for bool { #[inline(always)] fn from(variant: PAM_A) -> Self { variant as u8 != 0 } } impl PAM_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PAM_A { match self.bits { false => PAM_A::Disabled, true => PAM_A::Enabled, } } #[doc = "Filtering of multicast frames depends on HM"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == PAM_A::Disabled } #[doc = "All received frames with a multicast destination address are passed"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == PAM_A::Enabled } } #[doc = "Field `PAM` writer - Pass all multicast"] pub type PAM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, PAM_A>; impl<'a, REG, const O: u8> PAM_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Filtering of multicast frames depends on HM"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(PAM_A::Disabled) } #[doc = "All received frames with a multicast destination address are passed"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(PAM_A::Enabled) } } #[doc = "Field `BFD` reader - Broadcast frames disable"] pub type BFD_R = crate::BitReader<BFD_A>; #[doc = "Broadcast frames disable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum BFD_A { #[doc = "0: Address filters pass all received broadcast frames"] Enabled = 0, #[doc = "1: Address filters filter all incoming broadcast frames"] Disabled = 1, } impl From<BFD_A> for bool { #[inline(always)] fn from(variant: BFD_A) -> Self { variant as u8 != 0 } } impl BFD_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> BFD_A { match self.bits { false => BFD_A::Enabled, true => BFD_A::Disabled, } } #[doc = "Address filters pass all received broadcast frames"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == BFD_A::Enabled } #[doc = "Address filters filter all incoming broadcast frames"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == BFD_A::Disabled } } #[doc = "Field `BFD` writer - Broadcast frames disable"] pub type BFD_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, BFD_A>; impl<'a, REG, const O: u8> BFD_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Address filters pass all received broadcast frames"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(BFD_A::Enabled) } #[doc = "Address filters filter all incoming broadcast frames"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(BFD_A::Disabled) } } #[doc = "Field `PCF` reader - Pass control frames"] pub type PCF_R = crate::FieldReader<PCF_A>; #[doc = "Pass control frames\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] #[repr(u8)] pub enum PCF_A { #[doc = "0: MAC prevents all control frames from reaching the application"] PreventAll = 0, #[doc = "1: MAC forwards all control frames to application except Pause"] ForwardAllExceptPause = 1, #[doc = "2: MAC forwards all control frames to application even if they fail the address filter"] ForwardAll = 2, #[doc = "3: MAC forwards control frames that pass the address filter"] ForwardAllFiltered = 3, } impl From<PCF_A> for u8 { #[inline(always)] fn from(variant: PCF_A) -> Self { variant as _ } } impl crate::FieldSpec for PCF_A { type Ux = u8; } impl PCF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PCF_A { match self.bits { 0 => PCF_A::PreventAll, 1 => PCF_A::ForwardAllExceptPause, 2 => PCF_A::ForwardAll, 3 => PCF_A::ForwardAllFiltered, _ => unreachable!(), } } #[doc = "MAC prevents all control frames from reaching the application"] #[inline(always)] pub fn is_prevent_all(&self) -> bool { *self == PCF_A::PreventAll } #[doc = "MAC forwards all control frames to application except Pause"] #[inline(always)] pub fn is_forward_all_except_pause(&self) -> bool { *self == PCF_A::ForwardAllExceptPause } #[doc = "MAC forwards all control frames to application even if they fail the address filter"] #[inline(always)] pub fn is_forward_all(&self) -> bool { *self == PCF_A::ForwardAll } #[doc = "MAC forwards control frames that pass the address filter"] #[inline(always)] pub fn is_forward_all_filtered(&self) -> bool { *self == PCF_A::ForwardAllFiltered } } #[doc = "Field `PCF` writer - Pass control frames"] pub type PCF_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 2, O, PCF_A>; impl<'a, REG, const O: u8> PCF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, REG::Ux: From<u8>, { #[doc = "MAC prevents all control frames from reaching the application"] #[inline(always)] pub fn prevent_all(self) -> &'a mut crate::W<REG> { self.variant(PCF_A::PreventAll) } #[doc = "MAC forwards all control frames to application except Pause"] #[inline(always)] pub fn forward_all_except_pause(self) -> &'a mut crate::W<REG> { self.variant(PCF_A::ForwardAllExceptPause) } #[doc = "MAC forwards all control frames to application even if they fail the address filter"] #[inline(always)] pub fn forward_all(self) -> &'a mut crate::W<REG> { self.variant(PCF_A::ForwardAll) } #[doc = "MAC forwards control frames that pass the address filter"] #[inline(always)] pub fn forward_all_filtered(self) -> &'a mut crate::W<REG> { self.variant(PCF_A::ForwardAllFiltered) } } #[doc = "Field `SAIF` reader - Source address inverse filtering"] pub type SAIF_R = crate::BitReader<SAIF_A>; #[doc = "Source address inverse filtering\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum SAIF_A { #[doc = "0: Source address filter operates normally"] Normal = 0, #[doc = "1: Source address filter operation inverted"] Invert = 1, } impl From<SAIF_A> for bool { #[inline(always)] fn from(variant: SAIF_A) -> Self { variant as u8 != 0 } } impl SAIF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SAIF_A { match self.bits { false => SAIF_A::Normal, true => SAIF_A::Invert, } } #[doc = "Source address filter operates normally"] #[inline(always)] pub fn is_normal(&self) -> bool { *self == SAIF_A::Normal } #[doc = "Source address filter operation inverted"] #[inline(always)] pub fn is_invert(&self) -> bool { *self == SAIF_A::Invert } } #[doc = "Field `SAIF` writer - Source address inverse filtering"] pub type SAIF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, SAIF_A>; impl<'a, REG, const O: u8> SAIF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Source address filter operates normally"] #[inline(always)] pub fn normal(self) -> &'a mut crate::W<REG> { self.variant(SAIF_A::Normal) } #[doc = "Source address filter operation inverted"] #[inline(always)] pub fn invert(self) -> &'a mut crate::W<REG> { self.variant(SAIF_A::Invert) } } #[doc = "Field `SAF` reader - Source address filter"] pub type SAF_R = crate::BitReader<SAF_A>; #[doc = "Source address filter\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum SAF_A { #[doc = "0: Source address ignored"] Disabled = 0, #[doc = "1: MAC drops frames that fail the source address filter"] Enabled = 1, } impl From<SAF_A> for bool { #[inline(always)] fn from(variant: SAF_A) -> Self { variant as u8 != 0 } } impl SAF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SAF_A { match self.bits { false => SAF_A::Disabled, true => SAF_A::Enabled, } } #[doc = "Source address ignored"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == SAF_A::Disabled } #[doc = "MAC drops frames that fail the source address filter"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == SAF_A::Enabled } } #[doc = "Field `SAF` writer - Source address filter"] pub type SAF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, SAF_A>; impl<'a, REG, const O: u8> SAF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Source address ignored"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(SAF_A::Disabled) } #[doc = "MAC drops frames that fail the source address filter"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(SAF_A::Enabled) } } #[doc = "Field `HPF` reader - Hash or perfect filter"] pub type HPF_R = crate::BitReader<HPF_A>; #[doc = "Hash or perfect filter\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HPF_A { #[doc = "0: If HM or HU is set, only frames that match the Hash filter are passed"] HashOnly = 0, #[doc = "1: If HM or HU is set, frames that match either the perfect filter or the hash filter are passed"] HashOrPerfect = 1, } impl From<HPF_A> for bool { #[inline(always)] fn from(variant: HPF_A) -> Self { variant as u8 != 0 } } impl HPF_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> HPF_A { match self.bits { false => HPF_A::HashOnly, true => HPF_A::HashOrPerfect, } } #[doc = "If HM or HU is set, only frames that match the Hash filter are passed"] #[inline(always)] pub fn is_hash_only(&self) -> bool { *self == HPF_A::HashOnly } #[doc = "If HM or HU is set, frames that match either the perfect filter or the hash filter are passed"] #[inline(always)] pub fn is_hash_or_perfect(&self) -> bool { *self == HPF_A::HashOrPerfect } } #[doc = "Field `HPF` writer - Hash or perfect filter"] pub type HPF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, HPF_A>; impl<'a, REG, const O: u8> HPF_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "If HM or HU is set, only frames that match the Hash filter are passed"] #[inline(always)] pub fn hash_only(self) -> &'a mut crate::W<REG> { self.variant(HPF_A::HashOnly) } #[doc = "If HM or HU is set, frames that match either the perfect filter or the hash filter are passed"] #[inline(always)] pub fn hash_or_perfect(self) -> &'a mut crate::W<REG> { self.variant(HPF_A::HashOrPerfect) } } #[doc = "Field `RA` reader - Receive all"] pub type RA_R = crate::BitReader<RA_A>; #[doc = "Receive all\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum RA_A { #[doc = "0: MAC receiver passes on to the application only those frames that have passed the SA/DA address file"] Disabled = 0, #[doc = "1: MAC receiver passes oll received frames on to the application"] Enabled = 1, } impl From<RA_A> for bool { #[inline(always)] fn from(variant: RA_A) -> Self { variant as u8 != 0 } } impl RA_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RA_A { match self.bits { false => RA_A::Disabled, true => RA_A::Enabled, } } #[doc = "MAC receiver passes on to the application only those frames that have passed the SA/DA address file"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == RA_A::Disabled } #[doc = "MAC receiver passes oll received frames on to the application"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == RA_A::Enabled } } #[doc = "Field `RA` writer - Receive all"] pub type RA_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, RA_A>; impl<'a, REG, const O: u8> RA_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "MAC receiver passes on to the application only those frames that have passed the SA/DA address file"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(RA_A::Disabled) } #[doc = "MAC receiver passes oll received frames on to the application"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(RA_A::Enabled) } } impl R { #[doc = "Bit 0 - Promiscuous mode"] #[inline(always)] pub fn pm(&self) -> PM_R { PM_R::new((self.bits & 1) != 0) } #[doc = "Bit 1 - Hash unicast"] #[inline(always)] pub fn hu(&self) -> HU_R { HU_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 2 - Hash multicast"] #[inline(always)] pub fn hm(&self) -> HM_R { HM_R::new(((self.bits >> 2) & 1) != 0) } #[doc = "Bit 3 - Destination address unique filtering"] #[inline(always)] pub fn daif(&self) -> DAIF_R { DAIF_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bit 4 - Pass all multicast"] #[inline(always)] pub fn pam(&self) -> PAM_R { PAM_R::new(((self.bits >> 4) & 1) != 0) } #[doc = "Bit 5 - Broadcast frames disable"] #[inline(always)] pub fn bfd(&self) -> BFD_R { BFD_R::new(((self.bits >> 5) & 1) != 0) } #[doc = "Bits 6:7 - Pass control frames"] #[inline(always)] pub fn pcf(&self) -> PCF_R { PCF_R::new(((self.bits >> 6) & 3) as u8) } #[doc = "Bit 8 - Source address inverse filtering"] #[inline(always)] pub fn saif(&self) -> SAIF_R { SAIF_R::new(((self.bits >> 8) & 1) != 0) } #[doc = "Bit 9 - Source address filter"] #[inline(always)] pub fn saf(&self) -> SAF_R { SAF_R::new(((self.bits >> 9) & 1) != 0) } #[doc = "Bit 10 - Hash or perfect filter"] #[inline(always)] pub fn hpf(&self) -> HPF_R { HPF_R::new(((self.bits >> 10) & 1) != 0) } #[doc = "Bit 31 - Receive all"] #[inline(always)] pub fn ra(&self) -> RA_R { RA_R::new(((self.bits >> 31) & 1) != 0) } } impl W { #[doc = "Bit 0 - Promiscuous mode"] #[inline(always)] #[must_use] pub fn pm(&mut self) -> PM_W<MACFFR_SPEC, 0> { PM_W::new(self) } #[doc = "Bit 1 - Hash unicast"] #[inline(always)] #[must_use] pub fn hu(&mut self) -> HU_W<MACFFR_SPEC, 1> { HU_W::new(self) } #[doc = "Bit 2 - Hash multicast"] #[inline(always)] #[must_use] pub fn hm(&mut self) -> HM_W<MACFFR_SPEC, 2> { HM_W::new(self) } #[doc = "Bit 3 - Destination address unique filtering"] #[inline(always)] #[must_use] pub fn daif(&mut self) -> DAIF_W<MACFFR_SPEC, 3> { DAIF_W::new(self) } #[doc = "Bit 4 - Pass all multicast"] #[inline(always)] #[must_use] pub fn pam(&mut self) -> PAM_W<MACFFR_SPEC, 4> { PAM_W::new(self) } #[doc = "Bit 5 - Broadcast frames disable"] #[inline(always)] #[must_use] pub fn bfd(&mut self) -> BFD_W<MACFFR_SPEC, 5> { BFD_W::new(self) } #[doc = "Bits 6:7 - Pass control frames"] #[inline(always)] #[must_use] pub fn pcf(&mut self) -> PCF_W<MACFFR_SPEC, 6> { PCF_W::new(self) } #[doc = "Bit 8 - Source address inverse filtering"] #[inline(always)] #[must_use] pub fn saif(&mut self) -> SAIF_W<MACFFR_SPEC, 8> { SAIF_W::new(self) } #[doc = "Bit 9 - Source address filter"] #[inline(always)] #[must_use] pub fn saf(&mut self) -> SAF_W<MACFFR_SPEC, 9> { SAF_W::new(self) } #[doc = "Bit 10 - Hash or perfect filter"] #[inline(always)] #[must_use] pub fn hpf(&mut self) -> HPF_W<MACFFR_SPEC, 10> { HPF_W::new(self) } #[doc = "Bit 31 - Receive all"] #[inline(always)] #[must_use] pub fn ra(&mut self) -> RA_W<MACFFR_SPEC, 31> { RA_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "Ethernet MAC frame filter register (ETH_MACCFFR)\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`macffr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`macffr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct MACFFR_SPEC; impl crate::RegisterSpec for MACFFR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`macffr::R`](R) reader structure"] impl crate::Readable for MACFFR_SPEC {} #[doc = "`write(|w| ..)` method takes [`macffr::W`](W) writer structure"] impl crate::Writable for MACFFR_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets MACFFR to value 0"] impl crate::Resettable for MACFFR_SPEC { const RESET_VALUE: Self::Ux = 0; }
mod math; use math::ray::hitable::{HitRecord, Hitable, HitableList, Sphere}; use math::ray::Ray; use math::vec::Vec3; use std::f32; fn main() { let nx: i32 = 200; let ny: i32 = 100; print!("P3\n{} {}\n255\n", nx, ny); let lower_left_corner = Vec3::new(-2.0, -1.0, -1.0); let horizontal = Vec3::new(4.0, 0.0, 0.0); let vertical = Vec3::new(0.0, 2.0, 0.0); let origin = Vec3::new(0.0, 0.0, 0.0); let hitable_list: Vec<Box<Hitable>> = vec![ Box::new(Sphere::new(0.0, 0.0, -1.0, 0.5)), Box::new(Sphere::new(0.0, -100.5, -1.0, 100.0)), ]; let world = HitableList::new(hitable_list); for j in (0..ny).rev() { for i in 0..nx { let u = i as f32 / nx as f32; let v = j as f32 / ny as f32; let r = Ray::new( &origin, &lower_left_corner + &horizontal * u + &vertical * v, ); let col = color(&r, &world); let ir: i32 = (255.99 * col[0]) as i32; let ig: i32 = (255.99 * col[1]) as i32; let ib: i32 = (255.99 * col[2]) as i32; println!("{} {} {}", ir, ig, ib); } } } fn color(r: &Ray, world: &Hitable) -> Vec3 { if let Some(HitRecord { t: _, p: _, normal }) = world.hit(r, 0.0, f32::MAX) { &Vec3::new(normal.x() + 1.0, normal.y() + 1.0, normal.z() + 1.0) * 0.5 } else { let unit_direction = r.direction().unit_vector(); let t = 0.5 * (unit_direction.y() + 1.0f32); &Vec3::new(1.0, 1.0, 1.0) * (1.0 - t) + &Vec3::new(0.5, 0.7, 1.0) * t } }
use bit_field::BitArray; use bitflags::_core::cmp::min; fn main() { // println!("Hello, world!"); // // let mut t = SegmentTreeAllocator::new(8); // t.alloc(); // t.alloc(); // t.alloc(); // t.alloc(); // t.alloc(); // // // t.dealloc(1); // let mut a = vec![1u8,2u8,3u8]; // a.set_bit(4,true); // println!("{:?}",a); // println!("{:?}",a.get_bit(9)); // println!("{:?}",a.get_bit(14)); // println!("{:?}",a.get_bit(15)); // println!("{:?}",a.get_bit(16)); // let mut t = SegmentTree::new(6); // t.alloc(); // t.alloc(); // t.alloc(); // t.alloc(); // t.alloc(); // // // t.dealloc(1); } pub struct SegmentTreeAllocator { /// 树本身 tree: Vec<u8>, } impl SegmentTreeAllocator { fn new(capacity: usize) -> Self { assert!(capacity >= 8); // 完全二叉树的树叶数量 println!("开始新建一棵树"); let leaf_count = capacity.next_power_of_two(); println!("leaf_count = {}", leaf_count); let mut tree = vec![0u8; 2 * leaf_count]; println!("tree = {:?}, len = {}", tree, tree.len()); // 去除尾部超出范围的空间 println!("((capacity + 7) / 8) = {}, (leaf_count / 8) = {}.",((capacity + 7) / 8), (leaf_count / 8)); for i in ((capacity + 7) / 8)..(leaf_count / 8) { tree[leaf_count / 8 + i] = 255u8; } println!("tree = {:?}, len = {}", tree, tree.len()); for i in capacity..(capacity + 8) { tree.set_bit(leaf_count + i, true); } println!("tree = {:?}, len = {}", tree, tree.len()); // 沿树枝向上计算 for i in (1..leaf_count).rev() { let v = tree.get_bit(i * 2) && tree.get_bit(i * 2 + 1); tree.set_bit(i, v); } println!("{:?}", tree); println!("树建立结束\n"); Self { tree } } fn alloc(&mut self) -> Option<usize> { if self.tree.get_bit(1) { None } else { println!("开始alloc"); let mut node = 1; // 递归查找直到找到一个值为 0 的树叶 while node < self.tree.len() / 2 { if !self.tree.get_bit(node * 2) { node *= 2; } else if !self.tree.get_bit(node * 2 + 1) { node = node * 2 + 1; } else { panic!("tree is full or damaged"); } } // 检验 assert!(!self.tree.get_bit(node), "tree is damaged"); // 修改树 self.update_node(node, true); println!("tree = {:?}, len = {}", self.tree, self.tree.len()); println!("alloc结束"); Some(node - self.tree.len() / 2) } } fn dealloc(&mut self, index: usize) { let node = index + self.tree.len() / 2; assert!(self.tree.get_bit(node)); self.update_node(node, false); println!("tree = {:?}, len = {}", self.tree, self.tree.len()); } } impl SegmentTreeAllocator { /// 更新线段树中一个树叶,然后递归更新其祖先 fn update_node(&mut self, mut index: usize, value: bool) { println!("正在更新祖先"); self.tree.set_bit(index, value); while index > 1 { index /= 2; let v = self.tree.get_bit(index * 2) && self.tree.get_bit(index * 2 + 1); self.tree.set_bit(index, v); } println!("祖先更新结束"); } } pub struct SegmentTree { segment: Vec<u8>, capacity:usize, leaf_count:usize, } impl SegmentTree { fn new(capacity: usize) -> Self { println!("开始新建一棵树"); let leaf_count = capacity.next_power_of_two(); let mut tree =vec![0u8;(2*leaf_count/8)]; for i in capacity..leaf_count{ tree.set_bit(leaf_count+i,true); } for i in (1..leaf_count).rev(){ let v = tree.get_bit(i * 2) && tree.get_bit(i * 2 + 1); tree.set_bit(i,v); } println!("{:?}", tree); println!("树建立结束"); Self{segment:tree,capacity:capacity,leaf_count:leaf_count} } fn alloc(&mut self) -> Option<usize> { if self.segment.get_bit(1){ None }else{ let mut node =1; while node<self.leaf_count{ if !self.segment.get_bit(node*2) { node *= 2; }else if !self.segment.get_bit(node*2+1){ node = node*2+1; }else{ panic!("tree damaged"); } } self.update_node(node, true); println!("alloc开始"); println!("{:?}", self.segment); println!("alloc结束"); Some(node-self.leaf_count) } } fn dealloc(&mut self, index: usize) { let node = index + self.leaf_count; assert!(self.segment.get_bit(node)); self.update_node(node, false); println!("dealloc开始"); println!("{:?}", self.segment); println!("dealloc结束"); } fn update_node(&mut self, mut index: usize, value: bool) { self.segment.set_bit(index,value); while index>1{ index/=2; let v = self.segment.get_bit(index * 2) && self.segment.get_bit(index * 2 + 1); self.segment.set_bit(index,v); } } }
use std::mem; use std::collections::BTreeMap; use lv2_raw::atom::*; use lv2_raw::urid::LV2_URID as LV2_URID; pub struct AtomSequenceIter { pub seq: *const LV2_Atom_Sequence, pub next: *const LV2_Atom_Event, pub total: usize, } pub struct SequenceData { pub data_type: LV2_URID, pub time_frames: i64, // LV2_Atom_Event_Time, pub data: *const u8, pub size: usize } impl AtomSequenceIter { pub fn new(seq: *const LV2_Atom_Sequence) -> AtomSequenceIter { unsafe { AtomSequenceIter { seq: seq, next: seq.offset(1) as *const LV2_Atom_Event, total: (seq as usize) .checked_add((*seq).atom.size as usize) .unwrap() .checked_add(mem::size_of::<LV2_Atom>()) .unwrap(), } } } pub fn get_time_unit_urid(&self) -> LV2_URID { unsafe { (*self.seq).body.unit as LV2_URID } } } pub fn pad_size(size: u32) -> usize { let seven: usize = 7; (size as usize + seven) & !seven } impl Iterator for AtomSequenceIter { type Item = SequenceData; fn next(&mut self) -> Option<SequenceData> { if self.next as usize >= self.total { None } else { unsafe { let seqData = SequenceData { data_type: (*self.next).body.atom_type, time_frames: (*self.next).time_frames, data: self.next.offset(1) as *const u8, size: (*self.next).body.size as usize, }; let next_offset: usize = mem::size_of::<LV2_Atom_Event>() + pad_size((*self.next).body.size); self.next = ((self.next as usize).checked_add(next_offset as usize).unwrap()) as *const LV2_Atom_Event; Some(seqData) } } } } pub struct AtomObject { pub otype: LV2_URID, pub items: Vec<AtomProperty>, } pub enum AtomItem { AtomFloat(f32), AtomLong(i64), } pub struct AtomProperty { pub key: LV2_URID, pub context: u32, pub item: AtomItem, }
use hlist::*; use ty::{ Ar1, Eval, Eval1, Infer, Tm, Ty, infer }; use ty::bit::{ _0, _1, }; use ty::nat::pos; /// Type-level negative integers #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum Zn<P> {} /// Type-level positive integers #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum Zp<P> {} /// Type-level doubling for binary integers: #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum Double {} impl Infer for Double { type Arity = HC<(), HN>; type Mode = infer::mode::Constant; } /// `double(0) ==> 0` impl Eval<Double> for HC<_0, HN> { type Out = _0; } /// `double(-p) ==> -(p:0)` impl<P> Eval<Double> for HC<Zn<P>, HN> { type Out = Zn<(P, _0)>; } /// `double(+p) ==> +(p:0)` impl<P> Eval<Double> for HC<Zp<P>, HN> { type Out = Zp<(P, _0)>; } /// Type-level doubling with successor for binary integers: #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum SuccDouble {} impl Infer for SuccDouble { type Mode = infer::mode::Constant; type Ty = Ar1<Int, Int>; } /// `succ_double(0) ==> 1` impl Eval<SuccDouble> for HC<_0, HN> { type Out = Zp<_1>; } /// `succ_double[Int](-p) ==> -(pred_double[Pos](p))` impl<P, Rec> Eval<SuccDouble> for HC<Zn<P>, HN> where P: Eval1<pos::PredDouble, Out = Rec>, { type Out = Zn<Rec>; } /// `succ_double(+p) ==> +(p:1)` impl<P> Eval<SuccDouble> for HC<Zp<P>, HN> { type Out = Zp<(P, _1)>; } /// Type-level doubling with predecessor for binary integers: #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum PredDouble {} impl Infer for PredDouble { type Mode = infer::mode::Constant; type Ty = Ar1<Int, Int>; } /// `pred_double(0) ==> -1` impl Eval<PredDouble> for HC<_0, HN> { type Out = Zn<_1>; } /// `pred_double(-p) ==> -(p:1)` impl<P> Eval<PredDouble> for HC<Zn<P>, HN> { type Out = Zn<(P, _1)>; } /// `pred_double<Int>(+p) ==> +(pred_double<Pos>(p))` impl<P, Rec> Eval<PredDouble> for HC<Zp<P>, HN> where P: Eval1<pos::PredDouble, Out = Rec>, { type Out = Zp<Rec>; }
use crate::packet::Packet; use std::collections::HashMap; pub enum OutputResult { Entry(HashMap<String, String>), Packet(Packet), }
use std::collections::HashMap; fn main() { part1(); part2(); } fn part1() { let grid = get_populated_grid(); let mut highest = 0; let mut highest_point = (0, 0); for x in 0..=297 { for y in 0..=297 { let window_value = get_window_value((x, y), &grid, 3); if window_value > highest { highest = window_value; highest_point = (x, y); } } } dbg!(highest); dbg!(highest_point); } // need to memoize, I think // maybe store last results in a HashMap based on upper corner, add just the new items on the border // clear the hashmap between, if you want fn part2() { let grid = get_populated_grid(); let mut memo: HashMap<(i32, i32, i32), i32> = HashMap::new(); // (x, y, window size) => value let mut highest = 0; let mut highest_point = (0, 0, 1); // x, y, window_size for window_size in 1..=300 { for x in 0..=(300 - window_size) { for y in 0..=(300 - window_size) { let mut window_value: i32; if window_size == 1 { window_value = *grid.get(&(x, y)).unwrap(); } else { window_value = *memo.get(&(x, y, window_size - 1)).unwrap(); for add_x in x..x+window_size { window_value += grid.get(&(add_x, y + window_size - 1)).unwrap(); } for add_y in y..y+window_size-1 { // -1 here so we don't double count the corner window_value += grid.get(&(x + window_size - 1, add_y)).unwrap(); } } memo.insert((x, y, window_size), window_value); if window_value > highest { highest = window_value; highest_point = (x, y, window_size); } } } } dbg!(highest); dbg!(highest_point); } // very sloppy types due to laziness fn get_populated_grid() -> HashMap<(i32, i32), i32> { let serial_number = 6878; /* Find the fuel cell's rack ID, which is its X coordinate plus 10. Begin with a power level of the rack ID times the Y coordinate. Increase the power level by the value of the grid serial number (your puzzle input). Set the power level to itself multiplied by the rack ID. Keep only the hundreds digit of the power level (so 12345 becomes 3; numbers with no hundreds digit become 0). Subtract 5 from the power level. */ let mut grid: HashMap<(i32, i32), i32> = HashMap::with_capacity(300 * 300); for x in 0..300 { for y in 0..300 { let rack_id = x + 10; let mut power_level: i32 = rack_id * y; power_level += serial_number; power_level = rack_id * power_level; power_level = get_hundreds_digit(power_level); power_level -= 5; grid.insert((x, y), power_level); } } grid } fn get_hundreds_digit(number: i32) -> i32 { let as_str = number.to_string(); if as_str.len() < 3 { return 0 } let index = as_str.len() - 3; as_str.get(index..=index).map_or(0, |v| v.parse::<i32>().unwrap()) } fn get_window_value(point: (i32, i32), grid: &HashMap<(i32, i32), i32>, window_size: i32)-> i32 { let point_x = point.0; let point_y = point.1; let mut sum = 0; for x in point_x..point_x + window_size { for y in point_y..point_y + window_size { sum += grid.get(&(x, y)).unwrap(); } } sum } #[cfg(test)] mod test_get_hundreds_digit { use super::get_hundreds_digit; #[test] fn under_100() { assert_eq!(get_hundreds_digit(6), 0); } #[test] fn three_digit() { assert_eq!(get_hundreds_digit(683), 6); } #[test] fn many_digit() { assert_eq!(get_hundreds_digit(7813109), 1); } }
// Copyright 2020 David Li // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Common utilities for the RouteGuide. use crate::route_guide::{Feature, FeatureDatabase, Point, Rectangle}; use std::path::PathBuf; const COORD_FACTOR: f64 = 1e7; /// Gets the latitude for the given point. #[inline] fn get_latitude(location: &Point) -> f64 { location.get_latitude() as f64 / COORD_FACTOR } /// Gets the longitude for the given point. #[inline] fn get_longitude(location: &Point) -> f64 { location.get_longitude() as f64 / COORD_FACTOR } /// Gets the default features file. #[inline] fn get_default_features_file() -> PathBuf { let dir = env!("CARGO_MANIFEST_DIR"); let path = PathBuf::from(dir).join("data/route_guide_db.json"); assert!(path.exists()); path } /// Parses the JSON input file containing the list of features. #[inline] pub fn load_database() -> FeatureDatabase { let file = get_default_features_file(); let file = std::fs::File::open(file).unwrap(); serde_json::from_reader(file).unwrap() } /// Indicates whether the given feature exists (i.e. has a valid name). #[inline] pub fn exists(feature: &Feature) -> bool { !feature.get_name().is_empty() } /// Indicates whether the given two points are equal. #[inline] pub fn point_eq(p1: &Point, p2: &Point) -> bool { if p1.get_latitude() == p2.get_latitude() && p1.get_longitude() == p2.get_longitude() { true } else { false } } /// Checks if the given point is in features. #[inline] pub fn check_feature(features: &[Feature], location: &Point) -> Option<Feature> { features.iter().find_map(|f| { if point_eq(f.get_location(), location) { Some(f.clone()) } else { None } }) } /// Indicates whether the given point is in the range of the given rectangle. #[inline] pub fn in_range(point: &Point, rect: &Rectangle) -> bool { use std::cmp::{max, min}; let lo = rect.get_lo(); let hi = rect.get_hi(); let left = min(lo.get_longitude(), hi.get_longitude()); let right = max(lo.get_longitude(), hi.get_longitude()); let top = max(lo.get_latitude(), hi.get_latitude()); let bottom = min(lo.get_latitude(), hi.get_latitude()); let lat = point.get_latitude(); let lon = point.get_longitude(); if lon >= left && lon <= right && lat >= bottom && lat <= top { true } else { false } } /// Calculates distance between two points. #[inline] pub fn calc_distance(start: &Point, end: &Point) -> i32 { const R: i32 = 6371000; // earth radius in meters let lat1 = get_latitude(start).to_radians(); let lat2 = get_latitude(end).to_radians(); let lon1 = get_longitude(start).to_radians(); let lon2 = get_longitude(end).to_radians(); let delta_lat = lat2 - lat1; let delta_lon = lon2 - lon1; let a = (delta_lat / 2f64).sin() * (delta_lat / 2f64).sin() + lat1.cos() * lat2.cos() * (delta_lon / 2f64).sin() * (delta_lon / 2f64).sin(); let c = 2f64 * a.sqrt().atan2((1f64 - a).sqrt()); let distance = R as f64 * c; distance as i32 } /// Format point to `String`. #[inline] pub fn format_point(point: &Point) -> String { format!("({}, {})", get_latitude(point), get_longitude(point)) }
use crate::common::*; #[derive(Debug)] pub(crate) enum Setting<'src> { DotenvLoad(bool), Export(bool), PositionalArguments(bool), Shell(Shell<'src>), } #[derive(Debug, PartialEq)] pub(crate) struct Shell<'src> { pub(crate) command: StringLiteral<'src>, pub(crate) arguments: Vec<StringLiteral<'src>>, }
//! The module contains [`LimitColumns`] records iterator. use crate::grid::records::IntoRecords; /// An iterator which limits amount of columns. #[derive(Debug)] pub struct LimitColumns<I> { records: I, limit: usize, } impl LimitColumns<()> { /// Creates new [`LimitColumns`]. pub fn new<I: IntoRecords>(records: I, limit: usize) -> LimitColumns<I> { LimitColumns { records, limit } } } impl<I> IntoRecords for LimitColumns<I> where I: IntoRecords, { type Cell = I::Cell; type IterColumns = LimitColumnsColumnsIter<<I::IterColumns as IntoIterator>::IntoIter>; type IterRows = LimitColumnsIter<<I::IterRows as IntoIterator>::IntoIter>; fn iter_rows(self) -> Self::IterRows { LimitColumnsIter { iter: self.records.iter_rows().into_iter(), limit: self.limit, } } } /// An iterator over rows for [`LimitColumns`]. #[derive(Debug)] pub struct LimitColumnsIter<I> { iter: I, limit: usize, } impl<I> Iterator for LimitColumnsIter<I> where I: Iterator, I::Item: IntoIterator, <I::Item as IntoIterator>::Item: AsRef<str>, { type Item = LimitColumnsColumnsIter<<I::Item as IntoIterator>::IntoIter>; fn next(&mut self) -> Option<Self::Item> { let iter = self.iter.next()?; Some(LimitColumnsColumnsIter { iter: iter.into_iter(), limit: self.limit, }) } } /// An iterator over columns for [`LimitColumns`]. #[derive(Debug)] pub struct LimitColumnsColumnsIter<I> { iter: I, limit: usize, } impl<I> Iterator for LimitColumnsColumnsIter<I> where I: Iterator, I::Item: AsRef<str>, { type Item = I::Item; fn next(&mut self) -> Option<Self::Item> { if self.limit == 0 { return None; } self.limit -= 1; self.iter.next() } }
#[doc = "Register `RCC_MC_CIFR` reader"] pub type R = crate::R<RCC_MC_CIFR_SPEC>; #[doc = "Register `RCC_MC_CIFR` writer"] pub type W = crate::W<RCC_MC_CIFR_SPEC>; #[doc = "Field `LSIRDYF` reader - LSIRDYF"] pub type LSIRDYF_R = crate::BitReader; #[doc = "Field `LSIRDYF` writer - LSIRDYF"] pub type LSIRDYF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `LSERDYF` reader - LSERDYF"] pub type LSERDYF_R = crate::BitReader; #[doc = "Field `LSERDYF` writer - LSERDYF"] pub type LSERDYF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `HSIRDYF` reader - HSIRDYF"] pub type HSIRDYF_R = crate::BitReader; #[doc = "Field `HSIRDYF` writer - HSIRDYF"] pub type HSIRDYF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `HSERDYF` reader - HSERDYF"] pub type HSERDYF_R = crate::BitReader; #[doc = "Field `HSERDYF` writer - HSERDYF"] pub type HSERDYF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `CSIRDYF` reader - CSIRDYF"] pub type CSIRDYF_R = crate::BitReader; #[doc = "Field `CSIRDYF` writer - CSIRDYF"] pub type CSIRDYF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `PLL1DYF` reader - PLL1DYF"] pub type PLL1DYF_R = crate::BitReader; #[doc = "Field `PLL1DYF` writer - PLL1DYF"] pub type PLL1DYF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `PLL2DYF` reader - PLL2DYF"] pub type PLL2DYF_R = crate::BitReader; #[doc = "Field `PLL2DYF` writer - PLL2DYF"] pub type PLL2DYF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `PLL3DYF` reader - PLL3DYF"] pub type PLL3DYF_R = crate::BitReader; #[doc = "Field `PLL3DYF` writer - PLL3DYF"] pub type PLL3DYF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `PLL4DYF` reader - PLL4DYF"] pub type PLL4DYF_R = crate::BitReader; #[doc = "Field `PLL4DYF` writer - PLL4DYF"] pub type PLL4DYF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `LSECSSF` reader - LSECSSF"] pub type LSECSSF_R = crate::BitReader; #[doc = "Field `LSECSSF` writer - LSECSSF"] pub type LSECSSF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `WKUPF` reader - WKUPF"] pub type WKUPF_R = crate::BitReader; #[doc = "Field `WKUPF` writer - WKUPF"] pub type WKUPF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; impl R { #[doc = "Bit 0 - LSIRDYF"] #[inline(always)] pub fn lsirdyf(&self) -> LSIRDYF_R { LSIRDYF_R::new((self.bits & 1) != 0) } #[doc = "Bit 1 - LSERDYF"] #[inline(always)] pub fn lserdyf(&self) -> LSERDYF_R { LSERDYF_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 2 - HSIRDYF"] #[inline(always)] pub fn hsirdyf(&self) -> HSIRDYF_R { HSIRDYF_R::new(((self.bits >> 2) & 1) != 0) } #[doc = "Bit 3 - HSERDYF"] #[inline(always)] pub fn hserdyf(&self) -> HSERDYF_R { HSERDYF_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bit 4 - CSIRDYF"] #[inline(always)] pub fn csirdyf(&self) -> CSIRDYF_R { CSIRDYF_R::new(((self.bits >> 4) & 1) != 0) } #[doc = "Bit 8 - PLL1DYF"] #[inline(always)] pub fn pll1dyf(&self) -> PLL1DYF_R { PLL1DYF_R::new(((self.bits >> 8) & 1) != 0) } #[doc = "Bit 9 - PLL2DYF"] #[inline(always)] pub fn pll2dyf(&self) -> PLL2DYF_R { PLL2DYF_R::new(((self.bits >> 9) & 1) != 0) } #[doc = "Bit 10 - PLL3DYF"] #[inline(always)] pub fn pll3dyf(&self) -> PLL3DYF_R { PLL3DYF_R::new(((self.bits >> 10) & 1) != 0) } #[doc = "Bit 11 - PLL4DYF"] #[inline(always)] pub fn pll4dyf(&self) -> PLL4DYF_R { PLL4DYF_R::new(((self.bits >> 11) & 1) != 0) } #[doc = "Bit 16 - LSECSSF"] #[inline(always)] pub fn lsecssf(&self) -> LSECSSF_R { LSECSSF_R::new(((self.bits >> 16) & 1) != 0) } #[doc = "Bit 20 - WKUPF"] #[inline(always)] pub fn wkupf(&self) -> WKUPF_R { WKUPF_R::new(((self.bits >> 20) & 1) != 0) } } impl W { #[doc = "Bit 0 - LSIRDYF"] #[inline(always)] #[must_use] pub fn lsirdyf(&mut self) -> LSIRDYF_W<RCC_MC_CIFR_SPEC, 0> { LSIRDYF_W::new(self) } #[doc = "Bit 1 - LSERDYF"] #[inline(always)] #[must_use] pub fn lserdyf(&mut self) -> LSERDYF_W<RCC_MC_CIFR_SPEC, 1> { LSERDYF_W::new(self) } #[doc = "Bit 2 - HSIRDYF"] #[inline(always)] #[must_use] pub fn hsirdyf(&mut self) -> HSIRDYF_W<RCC_MC_CIFR_SPEC, 2> { HSIRDYF_W::new(self) } #[doc = "Bit 3 - HSERDYF"] #[inline(always)] #[must_use] pub fn hserdyf(&mut self) -> HSERDYF_W<RCC_MC_CIFR_SPEC, 3> { HSERDYF_W::new(self) } #[doc = "Bit 4 - CSIRDYF"] #[inline(always)] #[must_use] pub fn csirdyf(&mut self) -> CSIRDYF_W<RCC_MC_CIFR_SPEC, 4> { CSIRDYF_W::new(self) } #[doc = "Bit 8 - PLL1DYF"] #[inline(always)] #[must_use] pub fn pll1dyf(&mut self) -> PLL1DYF_W<RCC_MC_CIFR_SPEC, 8> { PLL1DYF_W::new(self) } #[doc = "Bit 9 - PLL2DYF"] #[inline(always)] #[must_use] pub fn pll2dyf(&mut self) -> PLL2DYF_W<RCC_MC_CIFR_SPEC, 9> { PLL2DYF_W::new(self) } #[doc = "Bit 10 - PLL3DYF"] #[inline(always)] #[must_use] pub fn pll3dyf(&mut self) -> PLL3DYF_W<RCC_MC_CIFR_SPEC, 10> { PLL3DYF_W::new(self) } #[doc = "Bit 11 - PLL4DYF"] #[inline(always)] #[must_use] pub fn pll4dyf(&mut self) -> PLL4DYF_W<RCC_MC_CIFR_SPEC, 11> { PLL4DYF_W::new(self) } #[doc = "Bit 16 - LSECSSF"] #[inline(always)] #[must_use] pub fn lsecssf(&mut self) -> LSECSSF_W<RCC_MC_CIFR_SPEC, 16> { LSECSSF_W::new(self) } #[doc = "Bit 20 - WKUPF"] #[inline(always)] #[must_use] pub fn wkupf(&mut self) -> WKUPF_W<RCC_MC_CIFR_SPEC, 20> { WKUPF_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "This register shall be used by the MCU in order to read and clear the interrupt flags.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`rcc_mc_cifr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`rcc_mc_cifr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct RCC_MC_CIFR_SPEC; impl crate::RegisterSpec for RCC_MC_CIFR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`rcc_mc_cifr::R`](R) reader structure"] impl crate::Readable for RCC_MC_CIFR_SPEC {} #[doc = "`write(|w| ..)` method takes [`rcc_mc_cifr::W`](W) writer structure"] impl crate::Writable for RCC_MC_CIFR_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets RCC_MC_CIFR to value 0"] impl crate::Resettable for RCC_MC_CIFR_SPEC { const RESET_VALUE: Self::Ux = 0; }
use std::collections::{HashMap, HashSet}; fn to_graph(input: &str) -> HashMap<&str, &str> { input .lines() .map(|line| { let mut parts = line.trim().split(')'); let a = parts.next().expect("aoc input"); let b = parts.next().expect("aoc input"); (b, a) }) .collect() } fn path_to_root<'a>(key: &'a str, graph: &'a HashMap<&str, &str>) -> Vec<&'a str> { std::iter::successors(Some(key), |k| graph.get(*k).copied()).collect() } fn solve_01(graph: &HashMap<&str, &str>) -> usize { graph .keys() .map(|key| path_to_root(key, graph).len() - 1) .sum() } fn solve_02(graph: &HashMap<&str, &str>) -> usize { let me = path_to_root("YOU", graph); let santa = path_to_root("SAN", graph); let me: HashSet<_> = me.iter().collect(); let santa: HashSet<_> = santa.iter().collect(); me.symmetric_difference(&santa).count() - 2 } pub fn solve(input: &str) { let graph = to_graph(input); println!("part one: {}", solve_01(&graph)); println!("part two: {}", solve_02(&graph)); } #[cfg(test)] mod tests { use super::*; #[test] fn part_one() { let graph = to_graph( "COM)B B)C C)D D)E E)F B)G G)H D)I E)J J)K K)L", ); assert_eq!(solve_01(&graph), 42); } #[test] fn part_two() { let graph = to_graph( "COM)B B)C C)D D)E E)F B)G G)H D)I E)J J)K K)L K)YOU I)SAN", ); assert_eq!(solve_02(&graph), 4); } }
#[cfg(test)] mod tests { use crate::cpu::Cpu; use crate::word::Word; use crate::{byte_le}; use crate::interrupt::Interrupt; macro_rules! bt_le { ( 0 ) => { 0 }; ( 1 ) => { 1 }; ( T ) => { -1 }; ( $trit:tt, $($rest:tt),+ ) => { bt_le!($trit) + 3 * bt_le!($($rest),+) }; } #[test] fn test_init() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = []; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(6).unwrap(); println!("\n{}", cpu); // panic!(); } #[test] fn test_load() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,T,0,1,0,0), // set b, 01000000000000001T byte_le!(T,1,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,1,0), byte_le!(0,0,T,T,1,0,0,0,1), // load c, [b] byte_le!(0,0,T,T,T,0,0,1,T), // load d.b, [b] byte_le!(0,0,T,T,0,0,0,1,0), // load e.t, [b] ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(4).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.b), bt_le!(T,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0)); assert_eq!(i64::from(cpu.regs.c), bt_le!(0,0,0,0,0,0,0,1,0,0,0,T,T,1,0,0,0,1)); assert_eq!(i64::from(cpu.regs.d), bt_le!(0,0,0,0,0,0,0,1,0)); assert_eq!(i64::from(cpu.regs.e), bt_le!(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0)); } #[test] fn test_halt() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,0,0,0,0,1), // addfz c, b ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } assert_eq!(cpu.fetch_decode_execute_one(false), Err(Interrupt::Halted)); } #[test] fn test_addsub_fz() { let mut cpu = Cpu::new(); cpu.init_default(); cpu.regs.b = Word::from(bt_le!(0,T)); cpu.regs.c = Word::from(bt_le!(1)); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,1,T,1,0,0,0,0,1), // addfz c, b byte_le!(0,1,T,0,T,0,0,0,1), // subfz c, b byte_le!(0,1,T,1,0,0,0,0,1), // addfz c, b ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); assert_eq!(cpu.fetch_decode_execute_one(false), Ok(())); assert_eq!(cpu.fetch_decode_execute_one(false), Ok(())); assert_eq!(i64::from(cpu.regs.c), -5); cpu.regs.c = Word::ZERO; assert_eq!(cpu.fetch_decode_execute_one(false), Err(Interrupt::AbsOpFromZero)); println!("\n{}", cpu); } #[test] fn test_cmp() { let mut cpu = Cpu::new(); cpu.init_default(); cpu.regs.b = Word::from(bt_le!(T)); cpu.regs.c = Word::from(bt_le!(0)); cpu.regs.d = Word::from(bt_le!(1)); cpu.regs.e = Word::from(bt_le!(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)); cpu.regs.f = Word::from(bt_le!(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,T)); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,1,T,0,0,0,0,0,0), // cmp b b byte_le!(0,1,T,0,0,0,1,0,0), // cmp b c byte_le!(0,1,T,0,0,1,T,0,0), // cmp b d byte_le!(0,1,T,0,0,0,1,0,1), // cmp c c byte_le!(0,1,T,0,0,1,T,0,1), // cmp c d byte_le!(0,1,T,0,0,1,T,1,T), // cmp c d byte_le!(0,1,T,0,0,1,1,1,0), // cmp e f byte_le!(0,1,T,0,0,1,0,1,1), // cmp f e byte_le!(0,1,T,0,0,1,0,1,0), // cmp e e byte_le!(0,1,T,0,0,1,1,1,1), // cmp f f byte_le!(T,0,0,1,0,0,0,0,0), // cmp b D1 ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.flags), bt_le!(0,1,T,0,0,1,T)); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.flags), bt_le!(T,T,T,0,1,1,1)); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.flags), bt_le!(T,T,T,0,0,1,T)); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.flags), bt_le!(0,1,T,0,0,0,0)); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.flags), bt_le!(T,T,T,0,0,0,0)); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.flags), bt_le!(0,1,T,0,0,1,T)); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.flags), bt_le!(1,1,1,1,0,1,T)); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.flags), bt_le!(T,T,T,T,0,1,T)); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.flags), bt_le!(0,1,T,0,0,1,T)); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.flags), bt_le!(0,1,T,0,0,1,T)); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.flags), bt_le!(T,T,T,0,0,1,T)); println!("\n{}", cpu); } #[test] fn test_shift() { let mut cpu = Cpu::new(); cpu.init_default(); cpu.regs.b = Word::from(bt_le!(1)); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(1,T,0,1,0,0,0,0,0), // shift b, 1 byte_le!(1,T,0,0,1,0,0,0,0), // shift b, 3 byte_le!(1,T,0,T,T,0,0,0,0), // shift b, -4 byte_le!(1,T,0,1,1,0,1,0,0), // shift b, -4 ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.b), 3); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.b), 81); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.b), 1); cpu.run(1).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.b), 0); } #[test] fn test_inimm_insts() { let mut cpu = Cpu::new(); cpu.init_default(); cpu.regs.b = Word::from(bt_le!(0,0,1)); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(T,T,T,1,0,0,0,0,0), // add b, 1 byte_le!(T,T,1,1,T,0,0,0,0), // sub b, T1 byte_le!(T,T,0,1,T,1,0,0,0), // mul b, 1T1 byte_le!(T,1,T,1,T,1,T,0,0), // div b, T1T1 byte_le!(T,1,1,0,1,0,0,0,0), // mod b, 10 byte_le!(T,1,0,1,0,0,0,0,0), // addfz b, 1 byte_le!(T,0,T,1,0,0,0,0,0), // subfz b, 1 ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(8).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.b), -1); } #[test] fn test_andorxormov() { let mut cpu = Cpu::new(); cpu.init_default(); cpu.regs.b = Word::from(bt_le!(0,1,T,0,1,T,0,1,T)); cpu.regs.c = Word::from(bt_le!(1,T,0,T,0,1,0,1,T)); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,1,1,0,0,0,1,1,T), // mov d, c byte_le!(0,1,1,T,T,0,0,1,T), // and d, b byte_le!(0,1,1,T,1,0,0,1,T), // or d, b byte_le!(0,1,1,T,0,0,1,0,0), // xor d, b ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(1).unwrap(); assert_eq!(cpu.regs.c, cpu.regs.d); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.d), bt_le!(0,T,T,T,0,T,0,1,T)); cpu.run(1).unwrap(); assert_eq!(i64::from(cpu.regs.d), bt_le!(0,1,T,0,1,T,0,1,T)); println!("\n{}", cpu); cpu.run(1).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.b), bt_le!(0,1,0,0,0,1,0,T,T)); } #[test] fn test_store() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,T,0,1,0,0), // set b, 010000000000000000 byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,1,0), byte_le!(0,0,T,T,1,0,0,0,1), // load c, [b] byte_le!(0,0,T,1,1,0,T,0,0), // store [b], a byte_le!(0,0,T,T,1,0,0,0,1), // load c, [b] byte_le!(0,0,T,T,T,0,0,1,T), // load d.b, [b] byte_le!(0,0,T,T,0,0,0,1,0), // load e.t, [b] ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(2).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.b), bt_le!(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0)); assert_eq!(i64::from(cpu.regs.c), bt_le!(0,0,0,0,T,0,1)); cpu.regs.a = Word::from(bt_le!(1,0,0,0,0,0,0,0,0,T)); cpu.run(4).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.a), bt_le!(1,0,0,0,0,0,0,0,0,T)); assert_eq!(i64::from(cpu.regs.c), bt_le!(1,0,0,0,0,0,0,0,0,T)); assert_eq!(i64::from(cpu.regs.d), 1); assert_eq!(i64::from(cpu.regs.e), bt_le!(0,0,0,0,0,0,0,0,0,1)); } #[test] fn test_addsubmuldivmod_regreg() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,T,0,1,0,0), // set b, 1T000 byte_le!(0,0,0,T,1,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,T,0,T,0,T), // set a, 10 byte_le!(0,1,0,0,0,0,0,0,0), byte_le!(0,1,T,T,T,0,0,0,0), // add b, b byte_le!(0,1,T,T,T,0,0,0,1), // add c, b byte_le!(0,1,T,T,1,0,1,1,T), // sub d, c byte_le!(0,1,T,T,1,0,0,0,1), // sub c, b byte_le!(0,1,T,T,0,0,T,0,0), // mul b, a byte_le!(0,1,T,1,T,0,T,1,T), // div d, a byte_le!(0,0,0,0,T,0,T,0,0), // set b, 101 byte_le!(1,0,1,0,0,0,0,0,0), byte_le!(0,1,T,1,1,0,T,0,0), // mod b, a ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(6).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.b), 108); assert_eq!(i64::from(cpu.regs.c), 0); assert_eq!(i64::from(cpu.regs.d), -108); cpu.run(2).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.b), 108*3); assert_eq!(i64::from(cpu.regs.d), -108/3); cpu.run(2).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.b), 1); } #[test] fn test_cjumprel() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,1,T,T,0,0), // cjumprel if flags.diff == T byte_le!(0,0,0,T,0,0,0,0,0), byte_le!(0,0,0,0,1,T,0,0,0), // cjumprel if flags.diff == 0 byte_le!(0,0,0,1,0,0,0,0,0), ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(2).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.pc.bytes[0]), 29); } #[test] fn test_cjumpabs() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,1,1,T,0,0), // cjumpabs if flags.diff == T byte_le!(0,0,0,T,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,T,0), byte_le!(0,0,0,0,1,1,0,0,0), // cjumpabs if flags.diff == 0 byte_le!(0,0,0,1,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,1,0), ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(2).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.pc.bytes[0]), 27); } #[test] fn test_callrel() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,0,0,0,1,T), // callrel 1T0 byte_le!(0,T,1,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,1,1,T,T), // pop pc // ret ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(4).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.pc.bytes[0]), 3); } #[test] fn test_callabs() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,0,0,0,1,1), // callabs 0100000000000001T1 byte_le!(1,T,1,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,1,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,1,1,T,T), // pop pc // ret ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } println!("{}", cpu); cpu.run(4).unwrap(); println!("\n{}", cpu); assert_eq!(i64::from(cpu.regs.pc.bytes[0]), 4); } #[test] fn test_set() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,T,0,1,0,0), // set reg.w byte_le!(0,0,0,0,T,1,1,1,1), byte_le!(0,0,0,0,1,T,T,T,T), byte_le!(0,0,0,0,T,0,T,0,1), // set reg.b byte_le!(0,0,0,0,1,1,T,1,1), byte_le!(0,0,0,0,T,0,0,0,1), // set reg.t byte_le!(0,0,0,0,T,T,1,T,T), ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } cpu.run(3).unwrap(); println!("{}", cpu); assert_eq!(i64::from(cpu.regs.b), bt_le!(0,0,0,0,T,1,1,1,1,0,0,0,0,1,T,T,T,T)); assert_eq!(i64::from(cpu.regs.c), bt_le!(0,0,0,0,1,1,T,1,1,0,0,0,0,T,T,1,T,T)); } #[test] fn test_push_pop() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,T,0,1,0,0), byte_le!(0,0,T,0,1,T,1,0,1), byte_le!(0,0,T,0,1,T,1,0,1), byte_le!(0,0,0,0,0,1,T,0,0), // push b byte_le!(0,0,0,0,0,1,1,0,1), // pop c byte_le!(0,0,0,0,0,1,T,0,1), // push c byte_le!(0,0,0,0,0,1,1,1,T), // pop d byte_le!(0,0,0,0,0,1,T,1,T), // push d byte_le!(0,0,0,0,0,1,1,1,0), // pop e byte_le!(0,0,0,0,0,1,T,1,T), // push d byte_le!(0,0,0,0,0,1,T,1,T), // push d byte_le!(0,0,0,0,0,1,T,1,T), // push d ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } cpu.run(10).unwrap(); assert_eq!(cpu.regs.b, cpu.regs.c); assert_eq!(cpu.regs.c, cpu.regs.d); assert_eq!(cpu.regs.d, cpu.regs.e); assert_eq!(i64::from(cpu.regs.sp), -7); } #[test] fn test_not() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,T,0,T,0,0), // set b.b byte_le!(0,0,T,0,1,T,1,0,1), byte_le!(0,0,0,0,T,0,T,0,1), // set c.b byte_le!(0,0,T,0,1,T,1,0,1), byte_le!(0,0,0,0,0,1,0,0,1), // not c ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } cpu.run(3).unwrap(); assert_eq!(i64::from(cpu.regs.b), -i64::from(cpu.regs.c)) } #[test] fn test_nop() { let mut cpu = Cpu::new(); cpu.init_default(); let (pc_space, pc_offset) = cpu.get_mut_space_and_offset(cpu.regs.pc).unwrap(); let shellcode = [ byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), byte_le!(0,0,0,0,0,0,0,0,0), ]; for (i, b) in shellcode.iter().enumerate() { pc_space.set_byte(pc_offset+(i as isize), *b).unwrap(); } let old_pc = cpu.regs.pc; cpu.run(5).unwrap(); assert_eq!(i64::from(old_pc) + 5, i64::from(cpu.regs.pc)) } }
use crate::State; use crate::{Instruction, HaltState}; #[derive(Debug)] pub enum Operation{ Addition, Multiplication, Store, Output, JumpIfTrue, JumpIfFalse, LessThan, Equals, AdjustRelativeBase } impl Operation{ pub fn process(&self, st: &mut State, ins: &Instruction) -> Option<HaltState> { let operation_function = match self{ Operation::Addition => op_addition, Operation::Multiplication => op_multiplication, Operation::Store => op_store, Operation::Output => op_output, Operation::JumpIfTrue => op_jumpiftrue, Operation::JumpIfFalse => op_jumpiffalse, Operation::LessThan => op_lessthan, Operation::Equals => op_equals, Operation::AdjustRelativeBase => op_adjust_relative_base }; //dbg!(ins); return operation_function(st, ins); } } fn op_adjust_relative_base(st: &mut State, ins: &Instruction) -> Option<HaltState>{ let params = ins.get_parameters(st); st.update_relative_base(params[0]); st.increment_address(ins.size() as i64); return None; } fn op_addition(st: &mut State, ins: &Instruction) -> Option<HaltState> { let params = ins.get_parameters(st); st.write( ins.get_target_address(st.relative_base), params[0] + params[1] ); st.increment_address(ins.size() as i64); return None; } fn op_multiplication(st: &mut State, ins: &Instruction) -> Option<HaltState> { let params = ins.get_parameters(st); st.write( ins.get_target_address(st.relative_base), params[0] * params[1] ); st.increment_address(ins.size() as i64); return None; } fn op_store(st: &mut State, ins: &Instruction) -> Option<HaltState> { let value = match st.input.pop(){ Some(v) => v, None => return Some(HaltState::WaitingForInput) }; st.write( ins.get_target_address(st.relative_base), value ); st.increment_address(ins.size() as i64); return None; } fn op_output(st: &mut State, ins: &Instruction) -> Option<HaltState> { let params = ins.get_parameters(st); st.output.push(params[0]); st.increment_address(ins.size() as i64); return None; } fn op_jumpiftrue(st: &mut State, ins: &Instruction) -> Option<HaltState> { let params = ins.get_parameters(st); let new_address = match params[0] != 0{ true => params[1], false => st.address + ins.size() as i64 }; st.set_address(new_address); return None; } fn op_jumpiffalse(st: &mut State, ins: &Instruction) -> Option<HaltState> { let params = ins.get_parameters(st); let new_address = match params[0] == 0{ true => params[1], false => st.address + ins.size() as i64 }; st.set_address(new_address); return None; } fn op_lessthan(st: &mut State, ins: &Instruction) -> Option<HaltState> { let params = ins.get_parameters(st); let answer: i64 = match params[0] < params[1] { true => 1, false => 0 }; st.write(ins.get_target_address(st.relative_base), answer); st.increment_address(ins.size() as i64); return None; } fn op_equals(st: &mut State, ins: &Instruction) -> Option<HaltState> { let params = ins.get_parameters(st); let answer: i64 = match params[0] == params[1] { true => 1, false => 0 }; st.write(ins.get_target_address(st.relative_base), answer); st.increment_address(ins.size() as i64); return None; }
//! The pixel component trait. #[cfg(feature = "f16-pixel-type")] use half::f16; use crate::format::{Format, SampleType}; /// A trait for possible pixel components. /// /// # Safety /// Implementing this trait allows retrieving slices of pixel data from the frame for the target /// type, so the target type must be valid for the given format. pub unsafe trait Component { /// Returns whether this component is valid for this format. fn is_valid(format: Format) -> bool; } unsafe impl Component for u8 { #[inline] fn is_valid(format: Format) -> bool { format.sample_type() == SampleType::Integer && format.bytes_per_sample() == 1 } } unsafe impl Component for u16 { #[inline] fn is_valid(format: Format) -> bool { format.sample_type() == SampleType::Integer && format.bytes_per_sample() == 2 } } unsafe impl Component for u32 { #[inline] fn is_valid(format: Format) -> bool { format.sample_type() == SampleType::Integer && format.bytes_per_sample() == 4 } } #[cfg(feature = "f16-pixel-type")] unsafe impl Component for f16 { #[inline] fn is_valid(format: Format) -> bool { format.sample_type() == SampleType::Float && format.bytes_per_sample() == 2 } } unsafe impl Component for f32 { #[inline] fn is_valid(format: Format) -> bool { format.sample_type() == SampleType::Float && format.bytes_per_sample() == 4 } }
extern crate pest; #[macro_use] extern crate pest_derive; #[macro_use] extern crate conrod_core; extern crate conrod_glium; // #[macro_use] extern crate conrod_winit; extern crate find_folder; extern crate glium; #[derive(Parser)] #[grammar = "_.pest"] pub struct OiParser; mod support; use glium::Surface; fn main() { use pest::Parser; // let src = "(\nwindow (canvas {width: 1024 height: 768}) \n)"; use std::fs::File; use std::io::Read; // use std::io::{BufReader}; let mut f = File::open("_.oi").unwrap(); // let mut reader = BufReader::new(f); let mut src = String::new(); f.read_to_string(&mut src).unwrap(); // parseするとPairs<Rule>が返る let mut parse_result = OiParser::parse(Rule::oi, &src).unwrap(); // その中はPair<Rule> let token_pairs = parse_result.next().unwrap(); // as_str その部分のソースコード // as_rule どの文法規則なのか // as_span その部分のソースコードと位置 // into_inner 子のPairs<Rule> // tokens?? Tokenはenum Start{Rule, Position}|End{Rule, Position} // tokenは入れ子構造をもっている入れ子の始まりでStart、終わりでEndが生成される // Positionは何バイト目かを持っているっぽい(行は考慮しない) use std::str::FromStr; let mut width = 0; let mut height = 0; for (i, token_pair) in token_pairs.into_inner().into_iter().enumerate() { match i { 0 => { // println!("0{:?}", token_pair.as_str()); width = u32::from_str(token_pair.as_str()).unwrap(); }, 1 => { // println!("1{:?}", token_pair.as_str()); height = u32::from_str(token_pair.as_str()).unwrap(); }, _ => unreachable!(), } // dbg!(&token_pair); // dbg!(&pair_def.tokens()); // let s = pair_def.as_str(); // dbg!(&s); } show_window(width, height); } fn show_window(width: u32, height: u32) { // let WIDTH: u32 = width; // let HEIGHT: u32 = height; // Build the window. let event_loop = glium::glutin::event_loop::EventLoop::new(); let window = glium::glutin::window::WindowBuilder::new() .with_title("wasmiq") .with_inner_size(glium::glutin::dpi::LogicalSize::new(width, height)); // .with_dimensions((WIDTH, HEIGHT).into()); let context = glium::glutin::ContextBuilder::new() .with_vsync(true) .with_multisampling(4); let display = glium::backend::glutin::Display::new(window, context, &event_loop).unwrap(); // Construct our `Ui`. let mut ui = conrod_core::UiBuilder::new([width as f64, height as f64]).build(); // A unique identifier for each widget. let ids = Ids::new(ui.widget_id_generator()); // Add a `Font` to the `Ui`'s `font::Map` from file. // let assets = find_folder::Search::KidsThenParents(3, 5) // .for_folder("assets") // .unwrap(); // let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf"); // let font_path = "fonts/Noto_Sans/NotoSans-Regular.ttf"; let font_path = "fonts/NotoMono-hinted/NotoMono-Regular.ttf"; // let font_path = "fonts/HackGen_v2.3.4/HackGen-Regular.ttf"; ui.fonts.insert_from_file(font_path).unwrap(); // A type used for converting `conrod_core::render::Primitives` into `Command`s that can be used // for drawing to the glium `Surface`. let mut renderer = conrod_glium::Renderer::new(&display).unwrap(); // The image map describing each of our widget->image mappings (in our case, none). let image_map = conrod_core::image::Map::<glium::texture::texture2d::Texture2d>::new(); // Some starting text to edit. let mut demo_text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. \ Mauris aliquet porttitor tellus vel euismod. Integer lobortis volutpat bibendum. Nulla \ finibus odio nec elit condimentum, rhoncus fermentum purus lacinia. Interdum et malesuada \ fames ac ante ipsum primis in faucibus. Cras rhoncus nisi nec dolor bibendum pellentesque. \ Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. \ Quisque commodo nibh hendrerit nunc sollicitudin sodales. Cras vitae tempus ipsum. Nam \ magna est, efficitur suscipit dolor eu, consectetur consectetur urna." .to_owned(); // Poll events from the window. support::run_loop(display, event_loop, move |request, display| { match request { support::Request::Event { event, should_update_ui, should_exit, } => { // Use the `winit` backend feature to convert the winit event to a conrod one. if let Some(event) = support::convert_event(&event, &display.gl_window().window()) { ui.handle_event(event); *should_update_ui = true; } match event { glium::glutin::event::Event::WindowEvent { event, .. } => match event { // Break from the loop upon `Escape`. glium::glutin::event::WindowEvent::CloseRequested | glium::glutin::event::WindowEvent::KeyboardInput { input: glium::glutin::event::KeyboardInput { virtual_keycode: Some(glium::glutin::event::VirtualKeyCode::Escape), .. }, .. } => *should_exit = true, _ => {} }, _ => {} } } support::Request::SetUi { needs_redraw } => { // Instantiate all widgets in the GUI. set_ui(ui.set_widgets(), &ids, &mut demo_text); // Get the underlying winit window and update the mouse cursor as set by conrod. display .gl_window() .window() .set_cursor_icon(support::convert_mouse_cursor(ui.mouse_cursor())); *needs_redraw = ui.has_changed(); } support::Request::Redraw => { // Render the `Ui` and then display it on the screen. let primitives = ui.draw(); renderer.fill(display, primitives, &image_map); let mut target = display.draw(); target.clear_color(0.0, 0.0, 0.0, 1.0); renderer.draw(display, &mut target, &image_map).unwrap(); target.finish().unwrap(); } } }) } widget_ids! { struct Ids { canvas, text_edit, scrollbar, button, rectangle, grid, node } } // Declare the `WidgetId`s and instantiate the widgets. fn set_ui(ref mut ui: conrod_core::UiCell, ids: &Ids, demo_text: &mut String) { use conrod_core::{color, widget, Colorable, Positionable, Sizeable, Widget}; widget::Canvas::new() .scroll_kids_vertically() .color(color::WHITE) .set(ids.canvas, ui); widget::Rectangle::fill([1024.0, 64.0]) .top_left_of(ids.canvas) .color(color::DARK_CHARCOAL) .set(ids.rectangle, ui); // let min_x = 0.0; // let max_x = std::f64::consts::PI * 2.0; let min_x = -1.0; let max_x = 1.0; let min_y = -1.0; let max_y = 1.0; let quarter_lines = widget::grid::Lines::step(0.5_f64).thickness(2.0); let sixteenth_lines = widget::grid::Lines::step(0.5_f64).thickness(1.0); let lines = &[ quarter_lines.x(), quarter_lines.y(), sixteenth_lines.x(), sixteenth_lines.y(), ]; widget::Grid::new(min_x, max_x, min_y, max_y, lines.iter().cloned()) .color(color::rgb(0.1, 0.12, 0.15)) .wh_of(ids.canvas) .middle_of(ids.canvas) .set(ids.grid, ui); widget::Rectangle::fill([32.0, 32.0]) .x_y_relative(32.0 * -6.0, 32.0 * 8.0) // .top_left_of(ids.canvas) .color(color::BLUE) .set(ids.node, ui); // for edit in widget::TextEdit::new(demo_text) // .color(color::WHITE) // .padded_w_of(ids.canvas, 20.0) // .mid_top_of(ids.canvas) // // .center_justify() // .line_spacing(2.5) // .restrict_to_height(false) // Let the height grow infinitely and scroll. // .set(ids.text_edit, ui) // { // *demo_text = edit; // } // widget::Scrollbar::y_axis(ids.canvas) // .auto_hide(true) // .set(ids.scrollbar, ui); // use conrod_core::Labelable; // if widget::Button::new() // .w_h(40.0, 40.0) // .top_left_of(ui.window) // // .label_font_id(&ui.theme) // .label("1") // .label_font_size(16) // .set(ids.button, ui) // .was_clicked() { // println!("1"); // } }
use sightglass_data::{Measurement, Phase}; use std::{ borrow::Cow, fmt::{self, Debug}, str::FromStr, }; /// An in-progress collection of measurements that are currently being recorded. pub struct Measurements<'a> { arch: &'a str, engine: &'a str, wasm: &'a str, process: u32, iteration: u32, measurements: Vec<Measurement<'a>>, } impl<'a> Measurements<'a> { /// Construct a new `Measurements`. pub fn new(arch: &'a str, engine: &'a str, wasm: &'a str) -> Self { Measurements { arch, engine, wasm, process: std::process::id(), iteration: 0, measurements: vec![], } } /// Advance the iteration counter. pub fn next_iteration(&mut self) { self.iteration += 1; } /// Reserve additional capacity for more measurements internally. pub fn reserve(&mut self, capacity: usize) { self.measurements.reserve(capacity); } /// Add a measurement of the given event for the given phase to this /// `Measurements` collection. pub fn add(&mut self, phase: Phase, event: Cow<'a, str>, count: u64) { self.measurements.push(Measurement { arch: self.arch.into(), engine: self.engine.into(), wasm: self.wasm.into(), process: self.process, iteration: self.iteration, phase, event, count, }); } /// When all measurements have been recorded, call this method to get the /// underlying measurements data. pub fn finish(self) -> Vec<Measurement<'a>> { self.measurements } } /// Recording measurements. /// /// This is primary trait for implementing different measurement mechanisms. The idea is that /// instantiating a measurement may take some time so it should be done once in `new` and data is /// collected by calling `start` and `end`. In a recording library like this one an error from /// implementors of this should result in a panic--not much point in recording anything if our /// measurement mechanism is broken. The same logic applies to misuse of the API (e.g. calling `end` /// before `start`). pub trait Measure: 'static { /// Start measuring. fn start(&mut self, phase: Phase); /// Finish measuring and add the measurements taken between `start` and /// `end` to `measurements`. fn end(&mut self, phase: Phase, measurements: &mut Measurements); } #[cfg(target_os = "linux")] pub mod counters; #[cfg(target_os = "linux")] pub mod insts; pub mod cycles; pub mod noop; pub mod vtune; /// [MeasureType] enumerates the implementations of [Measure] and allows us to `build` an instance /// from its name: /// ``` /// use sightglass_recorder::measure::MeasureType; /// let ty: MeasureType = "noop".parse().unwrap(); /// let measure = ty.build(); /// ``` #[derive(Debug, Clone, Copy)] pub enum MeasureType { /// No measurement. Noop, /// Measure cycles using, e.g., `RDTSC`. Cycles, /// Measure using VTune; this will return `0` values. VTune, /// Measure a combination of HW counters using `perf_event_open`. #[cfg(target_os = "linux")] PerfCounters, /// Measure instructions retired. #[cfg(target_os = "linux")] InstsRetired, } impl fmt::Display for MeasureType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { MeasureType::Noop => write!(f, "noop"), MeasureType::Cycles => write!(f, "cycles"), MeasureType::VTune => write!(f, "vtune"), #[cfg(target_os = "linux")] MeasureType::PerfCounters => write!(f, "perf-counters"), #[cfg(target_os = "linux")] MeasureType::InstsRetired => write!(f, "insts-retired"), } } } impl FromStr for MeasureType { type Err = &'static str; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "noop" => Ok(Self::Noop), "cycles" => Ok(Self::Cycles), "vtune" => Ok(Self::VTune), #[cfg(target_os = "linux")] "perf-counters" => Ok(Self::PerfCounters), #[cfg(target_os = "linux")] "insts-retired" => Ok(Self::InstsRetired), _ => Err("unknown measure type"), } } } impl MeasureType { /// Build a dynamic instance of a [Measure]. The recording infrastructure does not need to know /// exactly what type of [Measure] we want to use, just that it can `start` and `end` /// measurements. pub fn build(&self) -> Box<dyn Measure> { match self { Self::Noop => Box::new(noop::NoopMeasure::new()), Self::Cycles => Box::new(cycles::CycleMeasure::new()), Self::VTune => Box::new(vtune::VTuneMeasure::new()), #[cfg(target_os = "linux")] Self::PerfCounters => Box::new(counters::CounterMeasure::new()), #[cfg(target_os = "linux")] Self::InstsRetired => Box::new(insts::InstsRetiredMeasure::new()), } } } impl Measure for Box<dyn Measure> { fn start(&mut self, phase: Phase) { (**self).start(phase); } fn end(&mut self, phase: Phase, measurements: &mut Measurements) { (**self).end(phase, measurements) } }
use super::{alternative::AlternativeId, issue::IssueId, user::UserId, DbExecutor}; use crate::span::AsyncSpanHandler; use actix::prelude::*; use actix_interop::with_ctx; use color_eyre::eyre::{eyre, Report, WrapErr}; use serde::{Deserialize, Serialize}; use sqlx::{types::Uuid, Executor, Postgres}; use tracing::{debug, instrument}; #[derive(Clone, Hash, PartialEq, Eq, Debug, Deserialize, Serialize, sqlx::Type)] #[sqlx(transparent)] pub struct VoteId(pub Uuid); impl VoteId { pub fn new() -> Self { Self(Uuid::new_v4()) } } impl Default for VoteId { fn default() -> Self { Self::new() } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct InternalVote { pub id: VoteId, pub alternative_id: AlternativeId, pub issue_id: IssueId, pub user_id: UserId, } #[derive(Message, Clone, Debug)] #[rtype(result = "Result<InternalVote, Report>")] pub struct AddVote(pub UserId, pub IssueId, pub AlternativeId); async fn get_vote_for_user( executor: impl Executor<'_, Database = Postgres>, user_id: UserId, issue_id: IssueId, ) -> Result<Option<InternalVote>, Report> { sqlx::query_as!( InternalVote, r#" SELECT id as "id: _", alternative_id as "alternative_id: _", issue_id as "issue_id: _", user_id as "user_id: _" FROM votes WHERE user_id= $1 AND issue_id = $2 "#, user_id.0, issue_id.0, ) .fetch_optional(executor) .await .wrap_err("Got error while retrieving vote for user") } async fn get_votes_for_issue( executor: impl Executor<'_, Database = Postgres>, issue_id: IssueId, ) -> Result<Vec<InternalVote>, Report> { sqlx::query_as!( InternalVote, r#" SELECT id as "id: _", alternative_id as "alternative_id: _", issue_id as "issue_id: _", user_id as "user_id: _" FROM votes WHERE issue_id = $1 "#, issue_id.0, ) .fetch_all(executor) .await .wrap_err("Got error while retrieving votes for issue") } async fn insert_vote( executor: impl Executor<'_, Database = Postgres>, alternative_id: AlternativeId, issue_id: IssueId, user_id: UserId, ) -> Result<InternalVote, Report> { sqlx::query_as!( InternalVote, r#" INSERT INTO votes (alternative_id, issue_id, user_id) VALUES($1, $2, $3) RETURNING id as "id: _", alternative_id as "alternative_id: _", issue_id as "issue_id: _", user_id as "user_id: _" "#, alternative_id.0, issue_id.0, user_id.0, ) .fetch_one(executor) .await .wrap_err("Got error while adding vote to DB") } #[async_trait::async_trait] impl AsyncSpanHandler<AddVote> for DbExecutor { #[instrument] async fn handle(msg: AddVote) -> Result<InternalVote, Report> { debug!(vote = ?msg, "Adding vote"); let AddVote(user_id, issue_id, alternative_id) = msg; let pool = with_ctx(|a: &mut DbExecutor, _| a.pool()); let mut tx = pool.begin().await?; let user_vote = get_vote_for_user(&mut tx, user_id.clone(), issue_id.clone()).await?; if user_vote.is_some() { return Err(eyre!("User has already voted")); } let inserted_vote = insert_vote(&mut tx, alternative_id, issue_id, user_id).await?; tx.commit().await?; Ok(inserted_vote) } } crate::span_message_async_impl!(AddVote, DbExecutor); #[derive(Message, Clone, Debug)] #[rtype(result = "Result<Vec<InternalVote>, Report>")] pub struct VotesForIssue(pub IssueId); #[async_trait::async_trait] impl AsyncSpanHandler<VotesForIssue> for DbExecutor { #[instrument] async fn handle(msg: VotesForIssue) -> Result<Vec<InternalVote>, Report> { debug!("Retrieving votes for issue"); let VotesForIssue(issue_id) = msg; let pool = with_ctx(|a: &mut DbExecutor, _| a.pool()); get_votes_for_issue(&pool, issue_id).await } } crate::span_message_async_impl!(VotesForIssue, DbExecutor);
mod commands; mod config; mod error; mod image; mod ini_writer; mod launcher; mod network; mod qmp; mod storage; mod template; #[macro_use] extern crate clap; use tokio; #[tokio::main] async fn main() { let c = commands::Commands {}; if let Err(e) = c.evaluate().await { println!("error: {}", e.to_string()); } }
mod camera; mod hit; mod material; mod ray; mod sphere; mod util; use camera::Camera; use core::f32; use glam::Vec3A; use hit::{Hittable, HittableList}; use material::Material; use rand::random; use ray::Ray; use sphere::Sphere; use std::io::Write; use util::{random_between, vec3a_random}; fn main() { let mut spheres: Vec<Sphere> = Vec::new(); // Large planet/ground sphere spheres.push(Sphere { pos: Vec3A::new(0.0, -1000.0, 0.0), r: 1000.0, material: Material::Lambertian { albedo: Vec3A::new(0.5, 0.5, 0.5), }, }); // Random small spheres let span = 11; let center = Vec3A::new(4.0, 0.2, 0.0); for x in -span..span { for z in -span..span { let pos = Vec3A::new( x as f32 + 0.9 * random::<f32>(), 0.2, z as f32 + 0.9 * random::<f32>(), ); if (pos - center).length() <= 0.9 { continue; } let choose_mat = random::<f32>(); if choose_mat < 0.8 { // lambertian spheres.push(Sphere { pos, r: 0.2, material: Material::Lambertian { albedo: vec3a_random(0.0, 1.0) * vec3a_random(0.0, 1.0), }, }); } else if choose_mat < 0.95 { // metal spheres.push(Sphere { pos, r: 0.2, material: Material::Metal { albedo: vec3a_random(0.5, 1.0), fuzz: random_between(0.0, 0.5), }, }); } else { // dielectric spheres.push(Sphere { pos, r: 0.2, material: Material::Dielectric { ir: 1.5 }, }); } } } // Large spheres spheres.push(Sphere { pos: Vec3A::new(0.0, 1.0, 0.0), r: 1.0, material: Material::Dielectric { ir: 1.5 }, }); spheres.push(Sphere { pos: Vec3A::new(-4.0, 1.0, 0.0), r: 1.0, material: Material::Lambertian { albedo: Vec3A::new(0.4, 0.2, 0.1), }, }); spheres.push(Sphere { pos: Vec3A::new(4.0, 1.0, 0.0), r: 1.0, material: Material::Metal { albedo: Vec3A::new(0.7, 0.6, 0.5), fuzz: 0.0, }, }); let world = HittableList::new( spheres .iter() .map(|sphere| sphere as &dyn Hittable) .collect(), ); let aspect: f32 = 3.0 / 2.0; let w = 1200; let h = (w as f32 / aspect).round() as i32; let pos = Vec3A::new(13.0, 2.0, 3.0); let target = Vec3A::zero(); let camera = Camera::new(camera::Config { pos, target, vup: Vec3A::unit_y(), vfov: 0.35, aspect, lens_radius: 0.05, focus_distance: 10.0, }); println!("P3"); println!("{} {}", w, h); println!("255"); let sampling_rate = 500; let max_bounces = 50; for i in 0..h { let stderr = std::io::stderr(); let mut handle = stderr.lock(); handle .write_all(format!("\rscanlines remaining: {} ", h - i).as_bytes()) .unwrap(); for j in 0..w { let mut color = Vec3A::zero(); // average out multiple samples for antialiasing for _ in 0..sampling_rate { let u = (j as f32 + rand::random::<f32>()) / (w - 1) as f32; let v = ((h - i - 1) as f32 + rand::random::<f32>()) / (h - 1) as f32; let ray = camera.ray(u, v); color += ray_color(&ray, &world, max_bounces); } color /= sampling_rate as f32; // apply gamma correction color.x = color.x.sqrt().clamp(0.0, 0.999); color.y = color.y.sqrt().clamp(0.0, 0.999); color.z = color.z.sqrt().clamp(0.0, 0.999); write_color(color); } } } fn ray_color(ray: &Ray, world: &dyn Hittable, bounces: i32) -> Vec3A { if bounces <= 0 { return Vec3A::zero(); } // tmin is 0.001 to prevent "shadow acne" if let Some(hit) = world.check_hit(&ray, 0.001, f32::MAX) { if let Some(scatter) = hit.material.scatter(ray, &hit) { return scatter.attenuation * ray_color(&scatter.ray, world, bounces - 1); } return Vec3A::zero(); } // background let t = 0.5 * ray.dir.normalize().y + 0.5; Vec3A::lerp(Vec3A::one(), Vec3A::new(0.5, 0.7, 1.0), t) } fn write_color(color: Vec3A) { println!( "{} {} {}", (255.0 * color.x).round() as i32, (255.0 * color.y).round() as i32, (255.0 * color.z).round() as i32 ) }
extern crate ci_test; #[cfg(test)] mod tests { use ci_test::add_two; #[test] fn add_two_test2() { assert_eq!(4, add_two(2)); } }
mod difficulty; mod merkle_tree; pub use difficulty::{ compact_to_difficulty, compact_to_target, difficulty_to_compact, target_to_compact, DIFF_TWO, }; pub use merkle_tree::{merkle_root, MergeByte32, CBMT};
extern crate actix; extern crate futures; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering}; use actix::prelude::*; #[derive(Debug)] struct Num(usize); impl ResponseType for Num { type Item = (); type Error = (); } struct MyActor(Arc<AtomicUsize>, Arc<AtomicBool>); impl Actor for MyActor { type Context = actix::Context<Self>; fn stopping(&mut self, _: &mut Self::Context) -> bool { Arbiter::system().send(actix::msgs::SystemExit(0)); true } } impl actix::Handler<Result<Num, ()>> for MyActor { type Result = (); fn handle(&mut self, msg: Result<Num, ()>, _: &mut actix::Context<MyActor>) { if let Ok(msg) = msg { self.0.store(self.0.load(Ordering::Relaxed) + msg.0, Ordering::Relaxed); } else { self.1.store(true, Ordering::Relaxed); } } } #[test] fn test_stream() { let sys = System::new("test"); let count = Arc::new(AtomicUsize::new(0)); let err = Arc::new(AtomicBool::new(false)); let items = vec![Num(1), Num(1), Num(1), Num(1), Num(1), Num(1), Num(1)]; let act_count = Arc::clone(&count); MyActor::create::<(), _>(move |ctx| { ctx.add_stream(futures::stream::iter_ok::<_, ()>(items)); MyActor(act_count, err) }); sys.run(); assert_eq!(count.load(Ordering::Relaxed), 7); } #[test] fn test_stream_with_error() { let sys = System::new("test"); let count = Arc::new(AtomicUsize::new(0)); let error = Arc::new(AtomicBool::new(false)); let items = vec![Ok(Num(1)), Ok(Num(1)), Err(()), Ok(Num(1)), Ok(Num(1)), Ok(Num(1)), Ok(Num(1)), Ok(Num(1))]; let act_count = Arc::clone(&count); let act_error = Arc::clone(&error); MyActor::create::<(), _>(move |ctx| { ctx.add_stream(futures::stream::iter_result(items)); MyActor(act_count, act_error) }); sys.run(); assert_eq!(count.load(Ordering::Relaxed), 7); assert!(error.load(Ordering::Relaxed)); }
mod counter; mod data; mod meta_data; mod rc; mod size_hint_extending_iter; mod utils; pub use {counter::*, data::*, meta_data::*, rc::*, size_hint_extending_iter::*, utils::*};
#![feature(plugin, custom_derive)] #![plugin(rocket_codegen)] extern crate rocket; use std::net::SocketAddr; #[get("/")] fn get_ip(remote: SocketAddr) -> String { remote.to_string() } mod remote_rewrite_tests { use super::*; use rocket::local::Client; use rocket::http::{Header, Status}; use std::net::SocketAddr; const KNOWN_IP: &'static str = "127.0.0.1:8000"; fn check_ip(header: Option<Header<'static>>, ip: Option<String>) { let addr: SocketAddr = KNOWN_IP.parse().unwrap(); let c = Client::new(rocket::ignite().mount("/", routes![get_ip])).unwrap(); let mut response = match header { Some(header) => c.get("/").header(header).remote(addr).dispatch(), None => c.get("/").remote(addr).dispatch() }; assert_eq!(response.status(), Status::Ok); let body = response.body_string(); match ip { Some(ip) => assert_eq!(body, Some(format!("{}:{}", ip, addr.port()))), None => assert_eq!(body, Some(KNOWN_IP.into())) } } #[test] fn x_real_ip_rewrites() { let ip = "8.8.8.8"; check_ip(Some(Header::new("X-Real-IP", ip)), Some(ip.to_string())); let ip = "129.120.111.200"; check_ip(Some(Header::new("X-Real-IP", ip)), Some(ip.to_string())); } #[test] fn x_real_ip_rewrites_ipv6() { let ip = "2001:db8:0:1:1:1:1:1"; check_ip(Some(Header::new("X-Real-IP", ip)), Some(format!("[{}]", ip))); let ip = "2001:db8::2:1"; check_ip(Some(Header::new("X-Real-IP", ip)), Some(format!("[{}]", ip))); } #[test] fn uncased_header_rewrites() { let ip = "8.8.8.8"; check_ip(Some(Header::new("x-REAL-ip", ip)), Some(ip.to_string())); let ip = "1.2.3.4"; check_ip(Some(Header::new("x-real-ip", ip)), Some(ip.to_string())); } #[test] fn no_header_no_rewrite() { check_ip(Some(Header::new("real-ip", "?")), None); check_ip(None, None); } #[test] fn bad_header_doesnt_rewrite() { let ip = "092348092348"; check_ip(Some(Header::new("X-Real-IP", ip)), None); let ip = "1200:100000:0120129"; check_ip(Some(Header::new("X-Real-IP", ip)), None); let ip = "192.168.1.900"; check_ip(Some(Header::new("X-Real-IP", ip)), None); } }
#![feature(async_await, await_macro, futures_api)] #[macro_use] extern crate serde_derive; #[macro_use] extern crate typetag; #[macro_use] extern crate slog; #[macro_use] extern crate lazy_static; #[macro_use] extern crate derive_builder; #[macro_use] #[macro_export] pub mod macros; pub mod context; pub mod descriptions; pub mod environment; pub mod error; pub mod target; pub mod traits; pub mod utils; mod work; pub mod worktypes; pub mod repository; pub mod prelude { pub use super::context::*; pub use super::descriptions::*; pub use super::environment::*; pub use super::target::*; pub use super::traits::*; pub use super::worktypes; pub use serde_json::Value; pub use super::repository::*; } #[cfg(test)] mod tests { use super::prelude::*; use conveyor_work::http::Method; use serde_json::Value; use serde_yaml; #[test] fn it_works() { let desc = TargetDescription { name: "Loppen".to_string(), work: WorkTargetDescription { input: Value::String("https://loppen.dk".to_string()), steps: vec![WorkDescription { name: Some("description".to_string()), work: Box::new(worktypes::Flow { flow_name: "Crawl".to_string(), arguments: Some(args! { "script" => "file://./index.js" }), }), then: None, }], }, flows: vec![FlowDescription { name: "Crawl".to_string(), work: vec![ WorkDescription { name: None, work: Box::new(worktypes::Http { method: Some(Method::GET), }), then: None, }, WorkDescription { name: None, work: Box::new(worktypes::Duktape { script: "$script".to_string(), }), then: Some(Box::new(worktypes::Flow { flow_name: "Crawl".to_string(), arguments: Some(args! { "script" => "file://./concert.js" }), })), }, ], }], }; let s = serde_yaml::to_string(&desc).unwrap(); println!("{}", s); let desc: TargetDescription = serde_yaml::from_str(&s).unwrap(); println!("{:?}", desc); } }
use std::io; #[allow(non_snake_case)] #[allow(dead_code)] pub fn string_merathon() { println!("Enter Any String?"); let mut b = String::new(); io::stdin().read_line(&mut b) .expect("Failed to read line"); println!("How many Times To Run?"); let mut toberun = String::new(); io::stdin().read_line(&mut toberun) .expect("Failed to read line"); let toberun: u32 = toberun.trim().parse() .expect("Please type a number!"); let mut count = 0; while count < toberun { print!("{}",b); count +=1; } }
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::sync::{Arc, Mutex}; use chrono::Local; use dns_lookup::lookup_addr; use etherparse::{Ethernet2Header, IpHeader, PacketHeaders, TransportHeader}; use maxminddb::Reader; use pcap::{Active, Address, Capture, Device}; use crate::countries::country_utils::get_country; use crate::networking::types::address_port_pair::AddressPortPair; use crate::networking::types::app_protocol::from_port_to_application_protocol; use crate::networking::types::data_info::DataInfo; use crate::networking::types::data_info_host::DataInfoHost; use crate::networking::types::filters::Filters; use crate::networking::types::host::Host; use crate::networking::types::info_address_port_pair::InfoAddressPortPair; use crate::networking::types::my_device::MyDevice; use crate::networking::types::traffic_direction::TrafficDirection; use crate::networking::types::traffic_type::TrafficType; use crate::utils::asn::asn; use crate::utils::formatted_strings::get_domain_from_r_dns; use crate::IpVersion::{IPv4, IPv6}; use crate::{AppProtocol, InfoTraffic, IpVersion, TransProtocol}; /// Calls methods to analyze link, network, and transport headers. /// Returns the relevant collected information. pub fn analyze_headers( headers: PacketHeaders, mac_addresses: &mut (String, String), exchanged_bytes: &mut u128, protocols: &mut Filters, ) -> Option<AddressPortPair> { let mut address1 = String::new(); let mut address2 = String::new(); let mut port1 = 0; let mut port2 = 0; if !analyze_link_header(headers.link, &mut mac_addresses.0, &mut mac_addresses.1) { return None; } if !analyze_network_header( headers.ip, exchanged_bytes, &mut protocols.ip, &mut address1, &mut address2, ) { return None; } if !analyze_transport_header( headers.transport, &mut port1, &mut port2, &mut protocols.application, &mut protocols.transport, ) { return None; } Some(AddressPortPair::new( address1.clone(), port1, address2.clone(), port2, protocols.transport, )) } /// This function analyzes the data link layer header passed as parameter and updates variables /// passed by reference on the basis of the packet header content. /// Returns false if packet has to be skipped. fn analyze_link_header( link_header: Option<Ethernet2Header>, mac_address1: &mut String, mac_address2: &mut String, ) -> bool { match link_header { Some(header) => { *mac_address1 = mac_from_dec_to_hex(header.source); *mac_address2 = mac_from_dec_to_hex(header.destination); true } _ => false, } } /// This function analyzes the network layer header passed as parameter and updates variables /// passed by reference on the basis of the packet header content. /// Returns false if packet has to be skipped. fn analyze_network_header( network_header: Option<IpHeader>, exchanged_bytes: &mut u128, network_protocol: &mut IpVersion, address1: &mut String, address2: &mut String, ) -> bool { match network_header { Some(IpHeader::Version4(ipv4header, _)) => { *network_protocol = IpVersion::IPv4; *address1 = format!("{:?}", ipv4header.source) .replace('[', "") .replace(']', "") .replace(',', ".") .replace(' ', ""); *address2 = format!("{:?}", ipv4header.destination) .replace('[', "") .replace(']', "") .replace(',', ".") .replace(' ', ""); *exchanged_bytes = u128::from(ipv4header.payload_len); true } Some(IpHeader::Version6(ipv6header, _)) => { *network_protocol = IpVersion::IPv6; *address1 = ipv6_from_long_dec_to_short_hex(ipv6header.source); *address2 = ipv6_from_long_dec_to_short_hex(ipv6header.destination); *exchanged_bytes = u128::from(ipv6header.payload_length); true } _ => false, } } /// This function analyzes the transport layer header passed as parameter and updates variables /// passed by reference on the basis of the packet header content. /// Returns false if packet has to be skipped. fn analyze_transport_header( transport_header: Option<TransportHeader>, port1: &mut u16, port2: &mut u16, application_protocol: &mut AppProtocol, transport_protocol: &mut TransProtocol, ) -> bool { match transport_header { Some(TransportHeader::Udp(udp_header)) => { *port1 = udp_header.source_port; *port2 = udp_header.destination_port; *transport_protocol = TransProtocol::UDP; *application_protocol = from_port_to_application_protocol(*port1); if (*application_protocol).eq(&AppProtocol::Other) { *application_protocol = from_port_to_application_protocol(*port2); } true } Some(TransportHeader::Tcp(tcp_header)) => { *port1 = tcp_header.source_port; *port2 = tcp_header.destination_port; *transport_protocol = TransProtocol::TCP; *application_protocol = from_port_to_application_protocol(*port1); if (*application_protocol).eq(&AppProtocol::Other) { *application_protocol = from_port_to_application_protocol(*port2); } true } _ => false, } } /// Function to insert the source and destination of a packet into the shared map containing the analyzed traffic. pub fn modify_or_insert_in_map( info_traffic_mutex: &Arc<Mutex<InfoTraffic>>, key: &AddressPortPair, my_device: &MyDevice, mac_addresses: (String, String), exchanged_bytes: u128, application_protocol: AppProtocol, ) -> InfoAddressPortPair { let now = Local::now(); let mut traffic_direction = TrafficDirection::default(); let source_ip = &key.address1; let destination_ip = &key.address2; let very_long_address = source_ip.len() > 25 || destination_ip.len() > 25; let len = info_traffic_mutex.lock().unwrap().map.len(); let index = info_traffic_mutex .lock() .unwrap() .map .get_index_of(key) .unwrap_or(len); if index == len { // first occurrence of key // update device addresses let mut my_interface_addresses = Vec::new(); for dev in Device::list().expect("Error retrieving device list\r\n") { if dev.name.eq(&my_device.name) { let mut my_interface_addresses_mutex = my_device.addresses.lock().unwrap(); *my_interface_addresses_mutex = dev.addresses.clone(); drop(my_interface_addresses_mutex); my_interface_addresses = dev.addresses; break; } } // determine traffic direction traffic_direction = get_traffic_direction(source_ip, destination_ip, &my_interface_addresses); }; let mut info_traffic = info_traffic_mutex .lock() .expect("Error acquiring mutex\n\r"); let new_info: InfoAddressPortPair = info_traffic .map .entry(key.clone()) .and_modify(|info| { info.transmitted_bytes += exchanged_bytes; info.transmitted_packets += 1; info.final_timestamp = now; }) .or_insert(InfoAddressPortPair { mac_address1: mac_addresses.0, mac_address2: mac_addresses.1, transmitted_bytes: exchanged_bytes, transmitted_packets: 1, initial_timestamp: now, final_timestamp: now, app_protocol: application_protocol, very_long_address, traffic_direction, index, }) .clone(); info_traffic.addresses_last_interval.insert(index); if let Some(host_info) = info_traffic .addresses_resolved .get(&get_address_to_lookup(key, traffic_direction)) .cloned() { if info_traffic.favorite_hosts.contains(&host_info.1) { info_traffic.favorites_last_interval.insert(host_info.1); } } new_info } pub fn reverse_dns_lookup( info_traffic: &Arc<Mutex<InfoTraffic>>, key: &AddressPortPair, traffic_direction: TrafficDirection, my_device: &MyDevice, country_db_reader: &Reader<&[u8]>, asn_db_reader: &Reader<&[u8]>, ) { let address_to_lookup = get_address_to_lookup(key, traffic_direction); let my_interface_addresses = my_device.addresses.lock().unwrap().clone(); // perform rDNS lookup let lookup_result = lookup_addr(&address_to_lookup.parse().unwrap()); // get new host info and build the new host let traffic_type = get_traffic_type( &address_to_lookup, &my_interface_addresses, traffic_direction, ); let is_local = is_local_connection(&address_to_lookup, &my_interface_addresses); let country = get_country(&address_to_lookup, country_db_reader); let asn = asn(&address_to_lookup, asn_db_reader); let r_dns = if let Ok(result) = lookup_result { if result.is_empty() { address_to_lookup.clone() } else { result } } else { address_to_lookup.clone() }; let new_host = Host { domain: get_domain_from_r_dns(r_dns.clone()), asn, country, }; let mut info_traffic_lock = info_traffic.lock().unwrap(); // collect the data exchanged from the same address so far and remove the address from the collection of addresses waiting a rDNS let other_data = info_traffic_lock .addresses_waiting_resolution .remove(&address_to_lookup) .unwrap_or(DataInfo::default()); // insert the newly resolved host in the collections, with the data it exchanged so far info_traffic_lock .addresses_resolved .insert(address_to_lookup, (r_dns, new_host.clone())); info_traffic_lock .hosts .entry(new_host.clone()) .and_modify(|data_info_host| { data_info_host.data_info += other_data; }) .or_insert(DataInfoHost { data_info: other_data, is_favorite: false, is_local, traffic_type, }); // check if the newly resolved host was featured in the favorites (possible in case of already existing host) if info_traffic_lock.favorite_hosts.contains(&new_host) { info_traffic_lock.favorites_last_interval.insert(new_host); } drop(info_traffic_lock); } /// Returns the traffic direction observed (incoming or outgoing) fn get_traffic_direction( source_ip: &String, destination_ip: &String, my_interface_addresses: &[Address], ) -> TrafficDirection { let my_interface_addresses_string: Vec<String> = my_interface_addresses .iter() .map(|address| address.addr.to_string()) .collect(); if my_interface_addresses_string.contains(source_ip) { // source is local TrafficDirection::Outgoing } else if source_ip.ne("0.0.0.0") { // source not local and different from 0.0.0.0 TrafficDirection::Incoming } else if !my_interface_addresses_string.contains(destination_ip) { // source is 0.0.0.0 (local not yet assigned an IP) and destination is not local TrafficDirection::Outgoing } else { TrafficDirection::Incoming } } /// Returns the traffic type observed (unicast, multicast or broadcast) /// It refers to the remote host pub fn get_traffic_type( destination_ip: &str, my_interface_addresses: &[Address], traffic_direction: TrafficDirection, ) -> TrafficType { if traffic_direction.eq(&TrafficDirection::Outgoing) { if is_multicast_address(destination_ip) { TrafficType::Multicast } else if is_broadcast_address(destination_ip, my_interface_addresses) { TrafficType::Broadcast } else { TrafficType::Unicast } } else { TrafficType::Unicast } } /// Determines if the input address is a multicast address or not. /// /// # Arguments /// /// * `address` - string representing an IPv4 or IPv6 network address. fn is_multicast_address(address: &str) -> bool { let mut ret_val = false; if address.contains(':') { //IPv6 address if address.starts_with("ff") { ret_val = true; } } else { //IPv4 address let first_group = address .split('.') .next() .unwrap() .to_string() .parse::<u8>() .unwrap(); if (224..=239).contains(&first_group) { ret_val = true; } } ret_val } /// Determines if the input address is a broadcast address or not. /// /// # Arguments /// /// * `address` - string representing an IPv4 or IPv6 network address. fn is_broadcast_address(address: &str, my_interface_addresses: &[Address]) -> bool { if address.eq("255.255.255.255") { return true; } // check if directed broadcast let my_broadcast_addresses: Vec<String> = my_interface_addresses .iter() .map(|address| { address .broadcast_addr .unwrap_or("255.255.255.255".parse().unwrap()) .to_string() }) .collect(); if my_broadcast_addresses.contains(&address.to_string()) { return true; } false } /// Determines if the connection is local fn is_local_connection(address_to_lookup: &str, my_interface_addresses: &Vec<Address>) -> bool { let mut ret_val = false; let address_to_lookup_type = if address_to_lookup.contains(':') { IPv6 } else { IPv4 }; for address in my_interface_addresses { match address.addr { IpAddr::V4(local_addr) if address_to_lookup_type.eq(&IPv4) => { // check if the two IPv4 addresses are in the same subnet let address_to_lookup_parsed: Ipv4Addr = address_to_lookup.parse().unwrap_or(Ipv4Addr::from(0)); // remote is link local? if address_to_lookup_parsed.is_link_local() { ret_val = true; } // is the same subnet? else if let Some(IpAddr::V4(netmask)) = address.netmask { let mut local_subnet = Vec::new(); let mut remote_subnet = Vec::new(); let netmask_digits = netmask.octets(); let local_addr_digits = local_addr.octets(); let remote_addr_digits = address_to_lookup_parsed.octets(); for (i, netmask_digit) in netmask_digits.iter().enumerate() { local_subnet.push(netmask_digit & local_addr_digits[i]); remote_subnet.push(netmask_digit & remote_addr_digits[i]); } if local_subnet == remote_subnet { ret_val = true; } } } IpAddr::V6(local_addr) if address_to_lookup_type.eq(&IPv6) => { // check if the two IPv6 addresses are in the same subnet let address_to_lookup_parsed: Ipv6Addr = address_to_lookup.parse().unwrap_or(Ipv6Addr::from(0)); // remote is link local? if address_to_lookup.starts_with("fe80") { ret_val = true; } // is the same subnet? else if let Some(IpAddr::V6(netmask)) = address.netmask { let mut local_subnet = Vec::new(); let mut remote_subnet = Vec::new(); let netmask_digits = netmask.octets(); let local_addr_digits = local_addr.octets(); let remote_addr_digits = address_to_lookup_parsed.octets(); for (i, netmask_digit) in netmask_digits.iter().enumerate() { local_subnet.push(netmask_digit & local_addr_digits[i]); remote_subnet.push(netmask_digit & remote_addr_digits[i]); } if local_subnet == remote_subnet { ret_val = true; } } } _ => {} } } ret_val } /// Determines if the address passed as parameter belong to the chosen adapter pub fn is_my_address(address_to_lookup: &String, my_interface_addresses: &Vec<Address>) -> bool { let mut ret_val = false; for address in my_interface_addresses { if address.addr.to_string().eq(address_to_lookup) { ret_val = true; break; } } ret_val } /// Determines if the capture opening resolves into an Error pub fn get_capture_result(device: &MyDevice) -> (Option<String>, Option<Capture<Active>>) { let cap_result = Capture::from_device(&*device.name) .expect("Capture initialization error\n\r") .promisc(true) .snaplen(256) //limit stored packets slice dimension (to keep more in the buffer) .immediate_mode(true) //parse packets ASAP! .open(); if cap_result.is_err() { let err_string = cap_result.err().unwrap().to_string(); (Some(err_string), None) } else { (None, cap_result.ok()) } } /// Converts a MAC address in its hexadecimal form fn mac_from_dec_to_hex(mac_dec: [u8; 6]) -> String { let mut mac_hex = String::new(); for n in &mac_dec { mac_hex.push_str(&format!("{n:02x}:")); } mac_hex.pop(); mac_hex } pub fn get_address_to_lookup(key: &AddressPortPair, traffic_direction: TrafficDirection) -> String { match traffic_direction { TrafficDirection::Outgoing => key.address2.clone(), TrafficDirection::Incoming => key.address1.clone(), } } /// Function to convert a long decimal ipv6 address to a /// shorter compressed ipv6 address /// /// # Arguments /// /// * `ipv6_long` - Contains the 16 integer composing the not compressed decimal ipv6 address /// /// # Example /// /// ``` /// let result = ipv6_from_long_dec_to_short_hex([255,10,10,255,0,0,0,0,28,4,4,28,255,1,0,0]); /// assert_eq!(result, "ff0a:aff::1c04:41c:ff01:0".to_string()); /// ``` fn ipv6_from_long_dec_to_short_hex(ipv6_long: [u8; 16]) -> String { //from hex to dec, paying attention to the correct number of digits let mut ipv6_hex = String::new(); for i in 0..=15 { //even: first byte of the group if i % 2 == 0 { if *ipv6_long.get(i).unwrap() == 0 { continue; } ipv6_hex.push_str(&format!("{:x}", ipv6_long.get(i).unwrap())); } //odd: second byte of the group else if *ipv6_long.get(i - 1).unwrap() == 0 { ipv6_hex.push_str(&format!("{:x}:", ipv6_long.get(i).unwrap())); } else { ipv6_hex.push_str(&format!("{:02x}:", ipv6_long.get(i).unwrap())); } } ipv6_hex.pop(); // search for the longest zero sequence in the ipv6 address let mut to_compress: Vec<&str> = ipv6_hex.split(':').collect(); let mut longest_zero_sequence = 0; // max number of consecutive zeros let mut longest_zero_sequence_start = 0; // first index of the longest sequence of zeros let mut current_zero_sequence = 0; let mut current_zero_sequence_start = 0; let mut i = 0; for s in to_compress.clone() { if s.eq("0") { if current_zero_sequence == 0 { current_zero_sequence_start = i; } current_zero_sequence += 1; } else if current_zero_sequence != 0 { if current_zero_sequence > longest_zero_sequence { longest_zero_sequence = current_zero_sequence; longest_zero_sequence_start = current_zero_sequence_start; } current_zero_sequence = 0; } i += 1; } if current_zero_sequence != 0 { // to catch consecutive zeros at the end if current_zero_sequence > longest_zero_sequence { longest_zero_sequence = current_zero_sequence; longest_zero_sequence_start = current_zero_sequence_start; } } if longest_zero_sequence < 2 { // no compression needed return ipv6_hex; } //from longest sequence of consecutive zeros to '::' let mut ipv6_hex_compressed = String::new(); for _ in 0..longest_zero_sequence { to_compress.remove(longest_zero_sequence_start); } i = 0; if longest_zero_sequence_start == 0 { ipv6_hex_compressed.push_str("::"); } for s in to_compress { ipv6_hex_compressed.push_str(s); ipv6_hex_compressed.push(':'); i += 1; if i == longest_zero_sequence_start { ipv6_hex_compressed.push(':'); } } if ipv6_hex_compressed.ends_with("::") { return ipv6_hex_compressed; } ipv6_hex_compressed.pop(); ipv6_hex_compressed } #[cfg(test)] mod tests { use std::net::IpAddr; use pcap::Address; use crate::networking::manage_packets::{ get_traffic_direction, get_traffic_type, ipv6_from_long_dec_to_short_hex, is_local_connection, mac_from_dec_to_hex, }; use crate::networking::types::traffic_direction::TrafficDirection; use crate::networking::types::traffic_type::TrafficType; #[test] fn mac_simple_test() { let result = mac_from_dec_to_hex([255, 255, 10, 177, 9, 15]); assert_eq!(result, "ff:ff:0a:b1:09:0f".to_string()); } #[test] fn mac_all_zero_test() { let result = mac_from_dec_to_hex([0, 0, 0, 0, 0, 0]); assert_eq!(result, "00:00:00:00:00:00".to_string()); } #[test] fn ipv6_simple_test() { let result = ipv6_from_long_dec_to_short_hex([ 255, 10, 10, 255, 255, 10, 10, 255, 255, 10, 10, 255, 255, 10, 10, 255, ]); assert_eq!(result, "ff0a:aff:ff0a:aff:ff0a:aff:ff0a:aff".to_string()); } #[test] fn ipv6_zeros_in_the_middle() { let result = ipv6_from_long_dec_to_short_hex([ 255, 10, 10, 255, 0, 0, 0, 0, 28, 4, 4, 28, 255, 1, 0, 0, ]); assert_eq!(result, "ff0a:aff::1c04:41c:ff01:0".to_string()); } #[test] fn ipv6_leading_zeros() { let result = ipv6_from_long_dec_to_short_hex([0, 0, 0, 0, 0, 0, 0, 0, 28, 4, 4, 28, 255, 1, 0, 10]); assert_eq!(result, "::1c04:41c:ff01:a".to_string()); } #[test] fn ipv6_tail_one_after_zeros() { let result = ipv6_from_long_dec_to_short_hex([28, 4, 4, 28, 255, 1, 0, 10, 0, 0, 0, 0, 0, 0, 0, 1]); assert_eq!(result, "1c04:41c:ff01:a::1".to_string()); } #[test] fn ipv6_tail_zeros() { let result = ipv6_from_long_dec_to_short_hex([28, 4, 4, 28, 255, 1, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(result, "1c04:41c:ff01:a::".to_string()); } #[test] fn ipv6_multiple_zero_sequences_first_longer() { let result = ipv6_from_long_dec_to_short_hex([32, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1]); assert_eq!(result, "2000::101:0:0:1".to_string()); } #[test] fn ipv6_multiple_zero_sequences_first_longer_head() { let result = ipv6_from_long_dec_to_short_hex([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1]); assert_eq!(result, "::101:0:0:1".to_string()); } #[test] fn ipv6_multiple_zero_sequences_second_longer() { let result = ipv6_from_long_dec_to_short_hex([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 118]); assert_eq!(result, "100:0:0:1::376".to_string()); } #[test] fn ipv6_multiple_zero_sequences_second_longer_tail() { let result = ipv6_from_long_dec_to_short_hex([32, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0]); assert_eq!(result, "2000:0:0:1:101::".to_string()); } #[test] fn ipv6_multiple_zero_sequences_equal_length() { let result = ipv6_from_long_dec_to_short_hex([118, 3, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1]); assert_eq!(result, "7603::1:101:0:0:1".to_string()); } #[test] fn ipv6_all_zeros() { let result = ipv6_from_long_dec_to_short_hex([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(result, "::".to_string()); } #[test] fn ipv6_x_all_zeros() { let result = ipv6_from_long_dec_to_short_hex([161, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(result, "a100::".to_string()); } #[test] fn ipv6_all_zeros_x() { let result = ipv6_from_long_dec_to_short_hex([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 176]); assert_eq!(result, "::b0".to_string()); } #[test] fn ipv6_many_zeros_but_no_compression() { let result = ipv6_from_long_dec_to_short_hex([0, 16, 16, 0, 0, 1, 7, 0, 0, 2, 216, 0, 1, 0, 0, 1]); assert_eq!(result, "10:1000:1:700:2:d800:100:1".to_string()); } #[test] fn traffic_direction_ipv4_test() { let mut address_vec: Vec<Address> = Vec::new(); let my_address_v4 = Address { addr: IpAddr::V4("172.20.10.9".parse().unwrap()), netmask: Some(IpAddr::V4("255.255.255.240".parse().unwrap())), broadcast_addr: Some(IpAddr::V4("172.20.10.15".parse().unwrap())), dst_addr: None, }; let my_address_v6 = Address { addr: IpAddr::V6("fe80::8b1:1234:5678:d065".parse().unwrap()), netmask: Some(IpAddr::V6("ffff:ffff:ffff:ffff::".parse().unwrap())), broadcast_addr: None, dst_addr: None, }; address_vec.push(my_address_v4); address_vec.push(my_address_v6); let result1 = get_traffic_direction( &"172.20.10.9".to_string(), &"99.88.77.00".to_string(), &address_vec, ); assert_eq!(result1, TrafficDirection::Outgoing); let result2 = get_traffic_direction( &"172.20.10.10".to_string(), &"172.20.10.9".to_string(), &address_vec, ); assert_eq!(result2, TrafficDirection::Incoming); let result3 = get_traffic_direction( &"172.20.10.9".to_string(), &"0.0.0.0".to_string(), &address_vec, ); assert_eq!(result3, TrafficDirection::Outgoing); let result4 = get_traffic_direction( &"0.0.0.0".to_string(), &"172.20.10.9".to_string(), &address_vec, ); assert_eq!(result4, TrafficDirection::Incoming); let result4 = get_traffic_direction( &"0.0.0.0".to_string(), &"172.20.10.10".to_string(), &address_vec, ); assert_eq!(result4, TrafficDirection::Outgoing); } #[test] fn traffic_type_multicast_ipv4_test() { let result1 = get_traffic_type("227.255.255.0", &[], TrafficDirection::Outgoing); assert_eq!(result1, TrafficType::Multicast); let result2 = get_traffic_type("239.255.255.255", &[], TrafficDirection::Outgoing); assert_eq!(result2, TrafficType::Multicast); let result3 = get_traffic_type("224.0.0.0", &[], TrafficDirection::Outgoing); assert_eq!(result3, TrafficType::Multicast); let result4 = get_traffic_type("223.255.255.255", &[], TrafficDirection::Outgoing); assert_eq!(result4, TrafficType::Unicast); let result5 = get_traffic_type("240.0.0.0", &[], TrafficDirection::Outgoing); assert_eq!(result5, TrafficType::Unicast); let result6 = get_traffic_type("227.255.255.0", &[], TrafficDirection::Incoming); assert_eq!(result6, TrafficType::Unicast); let result7 = get_traffic_type("239.255.255.255", &[], TrafficDirection::Incoming); assert_eq!(result7, TrafficType::Unicast); let result8 = get_traffic_type("224.0.0.0", &[], TrafficDirection::Incoming); assert_eq!(result8, TrafficType::Unicast); let result9 = get_traffic_type("223.255.255.255", &[], TrafficDirection::Incoming); assert_eq!(result9, TrafficType::Unicast); let result10 = get_traffic_type("240.0.0.0", &[], TrafficDirection::Incoming); assert_eq!(result10, TrafficType::Unicast); } #[test] fn traffic_type_multicast_ipv6_test() { let result1 = get_traffic_type("ff::", &[], TrafficDirection::Outgoing); assert_eq!(result1, TrafficType::Multicast); let result2 = get_traffic_type("fe80:1234::", &[], TrafficDirection::Outgoing); assert_eq!(result2, TrafficType::Unicast); let result3 = get_traffic_type("ffff:ffff:ffff::", &[], TrafficDirection::Outgoing); assert_eq!(result3, TrafficType::Multicast); let result4 = get_traffic_type("ff::", &[], TrafficDirection::Incoming); assert_eq!(result4, TrafficType::Unicast); let result5 = get_traffic_type("fe80:1234::", &[], TrafficDirection::Incoming); assert_eq!(result5, TrafficType::Unicast); let result6 = get_traffic_type("ffff:ffff:ffff::", &[], TrafficDirection::Incoming); assert_eq!(result6, TrafficType::Unicast); } #[test] fn traffic_type_host_local_broadcast_test() { let result1 = get_traffic_type("255.255.255.255", &[], TrafficDirection::Outgoing); assert_eq!(result1, TrafficType::Broadcast); let result2 = get_traffic_type("255.255.255.255", &[], TrafficDirection::Incoming); assert_eq!(result2, TrafficType::Unicast); let result3 = get_traffic_type("255.255.255.254", &[], TrafficDirection::Outgoing); assert_eq!(result3, TrafficType::Unicast); let mut address_vec: Vec<Address> = Vec::new(); let my_address = Address { addr: IpAddr::V4("172.20.10.9".parse().unwrap()), netmask: Some(IpAddr::V4("255.255.255.240".parse().unwrap())), broadcast_addr: Some(IpAddr::V4("172.20.10.15".parse().unwrap())), dst_addr: None, }; address_vec.push(my_address); let result1 = get_traffic_type("255.255.255.255", &address_vec, TrafficDirection::Outgoing); assert_eq!(result1, TrafficType::Broadcast); let result2 = get_traffic_type("255.255.255.255", &address_vec, TrafficDirection::Incoming); assert_eq!(result2, TrafficType::Unicast); } #[test] fn traffic_type_host_directed_broadcast_test() { let result1 = get_traffic_type("172.20.10.15", &[], TrafficDirection::Outgoing); assert_eq!(result1, TrafficType::Unicast); let result2 = get_traffic_type("172.20.10.15", &[], TrafficDirection::Incoming); assert_eq!(result2, TrafficType::Unicast); let mut address_vec: Vec<Address> = Vec::new(); let my_address = Address { addr: IpAddr::V4("172.20.10.9".parse().unwrap()), netmask: Some(IpAddr::V4("255.255.255.240".parse().unwrap())), broadcast_addr: Some(IpAddr::V4("172.20.10.15".parse().unwrap())), dst_addr: None, }; address_vec.push(my_address); let result1 = get_traffic_type("172.20.10.15", &address_vec, TrafficDirection::Outgoing); assert_eq!(result1, TrafficType::Broadcast); let result2 = get_traffic_type("172.20.10.15", &address_vec, TrafficDirection::Incoming); assert_eq!(result2, TrafficType::Unicast); } #[test] fn is_local_connection_ipv4_test() { let mut address_vec: Vec<Address> = Vec::new(); let my_address_v4 = Address { addr: IpAddr::V4("172.20.10.9".parse().unwrap()), netmask: Some(IpAddr::V4("255.255.255.240".parse().unwrap())), broadcast_addr: Some(IpAddr::V4("172.20.10.15".parse().unwrap())), dst_addr: None, }; let my_address_v6 = Address { addr: IpAddr::V6("fe80::8b1:1234:5678:d065".parse().unwrap()), netmask: Some(IpAddr::V6("ffff:ffff:ffff:ffff::".parse().unwrap())), broadcast_addr: None, dst_addr: None, }; address_vec.push(my_address_v4); address_vec.push(my_address_v6); let result1 = is_local_connection("104.18.43.158", &address_vec); assert_eq!(result1, false); let result2 = is_local_connection("172.20.10.15", &address_vec); assert_eq!(result2, true); let result3 = is_local_connection("172.20.10.16", &address_vec); assert_eq!(result3, false); let result4 = is_local_connection("172.20.10.0", &address_vec); assert_eq!(result4, true); let result5 = is_local_connection("172.20.10.7", &address_vec); assert_eq!(result5, true); let result6 = is_local_connection("172.20.10.99", &address_vec); assert_eq!(result6, false); } #[test] fn is_local_connection_ipv6_test() { let mut address_vec: Vec<Address> = Vec::new(); let my_address_v4 = Address { addr: IpAddr::V4("172.20.10.9".parse().unwrap()), netmask: Some(IpAddr::V4("255.255.255.240".parse().unwrap())), broadcast_addr: Some(IpAddr::V4("172.20.10.15".parse().unwrap())), dst_addr: None, }; let my_address_v6 = Address { addr: IpAddr::V6("fe90:8b1:1234:5678:d065::1234".parse().unwrap()), netmask: Some(IpAddr::V6("ffff:ffff:ffff:ff11::".parse().unwrap())), broadcast_addr: None, dst_addr: None, }; address_vec.push(my_address_v4); address_vec.push(my_address_v6); let result1 = is_local_connection("fe90:8b1:1234:5611:d065::1234", &address_vec); assert_eq!(result1, false); let result2 = is_local_connection("fe90:8b1:1234:5610:d065::1234", &address_vec); assert_eq!(result2, true); let result3 = is_local_connection("ff90:8b1:1234:5610:d065::1234", &address_vec); assert_eq!(result3, false); let result4 = is_local_connection("fe90:8b1:1234:5610:ffff:eeee:9876:1234", &address_vec); assert_eq!(result4, true); } #[test] fn is_local_connection_ipv4_2_test() { let mut address_vec: Vec<Address> = Vec::new(); let my_address_v4 = Address { addr: IpAddr::V4("172.20.10.9".parse().unwrap()), netmask: Some(IpAddr::V4("255.255.255.0".parse().unwrap())), broadcast_addr: Some(IpAddr::V4("172.20.10.15".parse().unwrap())), dst_addr: None, }; let my_address_v6 = Address { addr: IpAddr::V6("fe80::8b1:1234:5678:d065".parse().unwrap()), netmask: Some(IpAddr::V6("ffff:ffff:ffff:ffff::".parse().unwrap())), broadcast_addr: None, dst_addr: None, }; address_vec.push(my_address_v4); address_vec.push(my_address_v6); let result1 = is_local_connection("255.255.255.255", &address_vec); assert_eq!(result1, false); let result2 = is_local_connection("172.20.10.9", &address_vec); assert_eq!(result2, true); let result3 = is_local_connection("172.20.10.9", &address_vec); assert_eq!(result3, true); let result4 = is_local_connection("172.20.10.9", &address_vec); assert_eq!(result4, true); let result5 = is_local_connection("172.20.10.7", &address_vec); assert_eq!(result5, true); let result6 = is_local_connection("172.20.10.99", &address_vec); assert_eq!(result6, true); let result7 = is_local_connection("172.20.11.0", &address_vec); assert_eq!(result7, false); let result8 = is_local_connection("172.20.9.255", &address_vec); assert_eq!(result8, false); } #[test] fn is_local_connection_ipv4_multicast_test() { let mut address_vec: Vec<Address> = Vec::new(); let my_address_v4 = Address { addr: IpAddr::V4("172.20.10.9".parse().unwrap()), netmask: Some(IpAddr::V4("255.255.255.240".parse().unwrap())), broadcast_addr: Some(IpAddr::V4("172.20.10.15".parse().unwrap())), dst_addr: None, }; let my_address_v6 = Address { addr: IpAddr::V6("fe80::8b1:1234:5678:d065".parse().unwrap()), netmask: Some(IpAddr::V6("ffff:ffff:ffff:ffff::".parse().unwrap())), broadcast_addr: None, dst_addr: None, }; address_vec.push(my_address_v4); address_vec.push(my_address_v6); let result1 = is_local_connection("224.0.0.251", &address_vec); assert_eq!(result1, false); } #[test] fn is_local_connection_ipv6_multicast_test() { let mut address_vec: Vec<Address> = Vec::new(); let my_address_v4 = Address { addr: IpAddr::V4("172.20.10.9".parse().unwrap()), netmask: Some(IpAddr::V4("255.255.255.240".parse().unwrap())), broadcast_addr: Some(IpAddr::V4("172.20.10.15".parse().unwrap())), dst_addr: None, }; let my_address_v6 = Address { addr: IpAddr::V6("fe80::8b1:1234:5678:d065".parse().unwrap()), netmask: Some(IpAddr::V6("ffff:ffff:ffff:ffff::".parse().unwrap())), broadcast_addr: None, dst_addr: None, }; address_vec.push(my_address_v4); address_vec.push(my_address_v6); let result1 = is_local_connection("ff::1234", &address_vec); assert_eq!(result1, false); } #[test] fn is_local_connection_ipv4_link_local_test() { let mut address_vec: Vec<Address> = Vec::new(); let my_address_v4 = Address { addr: IpAddr::V4("172.20.10.9".parse().unwrap()), netmask: Some(IpAddr::V4("255.255.255.240".parse().unwrap())), broadcast_addr: Some(IpAddr::V4("172.20.10.15".parse().unwrap())), dst_addr: None, }; let my_address_v6 = Address { addr: IpAddr::V6("fe80::8b1:1234:5678:d065".parse().unwrap()), netmask: Some(IpAddr::V6("ffff:ffff:ffff:ffff::".parse().unwrap())), broadcast_addr: None, dst_addr: None, }; address_vec.push(my_address_v4); address_vec.push(my_address_v6); let result1 = is_local_connection("224.0.1.2", &address_vec); assert_eq!(result1, false); let result2 = is_local_connection("169.254.17.199", &address_vec); assert_eq!(result2, true); let result3 = is_local_connection("169.255.17.199", &address_vec); assert_eq!(result3, false); } #[test] fn is_local_connection_ipv6_link_local_test() { let mut address_vec: Vec<Address> = Vec::new(); let my_address_v4 = Address { addr: IpAddr::V4("172.20.10.9".parse().unwrap()), netmask: Some(IpAddr::V4("255.255.255.240".parse().unwrap())), broadcast_addr: Some(IpAddr::V4("172.20.10.15".parse().unwrap())), dst_addr: None, }; let my_address_v6 = Address { addr: IpAddr::V6("fe90::8b1:1234:5678:d065".parse().unwrap()), netmask: Some(IpAddr::V6("ffff:ffff:ffff:ffff::".parse().unwrap())), broadcast_addr: None, dst_addr: None, }; address_vec.push(my_address_v4); address_vec.push(my_address_v6); let result1 = is_local_connection("ff88::", &address_vec); assert_eq!(result1, false); let result2 = is_local_connection("fe80::8b1:1234:5678:d065", &address_vec); assert_eq!(result2, true); let result3 = is_local_connection("fe70::8b1:1234:5678:d065", &address_vec); assert_eq!(result3, false); } }
pub mod part1; pub mod part2; pub fn run() { part1::run(); part2::run(); } pub fn default_input() -> &'static str { include_str!("input") } pub fn parse_input(input : &str) -> Vec<Vec<&str>> { input.split("\n\n").map(|g| {g.split("\n").collect::<Vec<_>>()}).collect() }
extern crate bytes; use std::io::{Error, Read, ErrorKind, Result, BufReader, Cursor}; use bytes::*; macro_rules! define_read_var { ($wrapper_name:ident, $typ:ty, $maxb:expr) => { pub fn $wrapper_name(bytes: &mut Cursor<Bytes>) -> Result<($typ, usize)> { let mut num_read: usize = 0; let mut result: $typ = 0; loop { if num_read >= $maxb { return Err(Error::new( ErrorKind::InvalidData, format!("Variable number at position {} has an invalid format (too many bytes)", bytes.position()))); } let read = bytes.read_u8()?; let value = read & 0b01111111; result |= (value as $typ) << (7 * num_read); num_read += 1; if (read & 0b10000000) == 0 { break; } } Ok((result, num_read)) } }; } define_read_var!(read_var_int, i32, 5); define_read_var!(read_var_long, i64, 10); pub trait MinecraftBufRead { fn read_var_int(&mut self) -> Result<(i32, usize)>; fn read_var_long(&mut self) -> Result<(i64, usize)>; } impl MinecraftBufRead for Cursor<Bytes> { fn read_var_int(&mut self) -> Result<(i32, usize)> { read_var_int(self) } fn read_var_long(&mut self) -> Result<(i64, usize)> { read_var_long(self) } } // This all is copied over from io::Read, with WouldBlock ignored rather than causing an error to work with mio struct Guard<'a> { buf: &'a mut Vec<u8>, len: usize } impl<'a> Drop for Guard<'a> { fn drop(&mut self) { unsafe { self.buf.set_len(self.len); } } } pub fn read_to_end<R: Read + ?Sized>(r: &mut R, buf: &mut Vec<u8>) -> Result<usize> { let start_len = buf.len(); let mut g = Guard { len: buf.len(), buf: buf }; let ret; loop { if g.len == g.buf.len() { unsafe { g.buf.reserve(32); let capacity = g.buf.capacity(); g.buf.set_len(capacity); r.initializer().initialize(&mut g.buf[g.len..]); } } match r.read(&mut g.buf[g.len..]) { Ok(0) => { ret = Ok(g.len - start_len); break; } Ok(n) => g.len += n, Err(ref e) if e.kind() == ErrorKind::Interrupted => {} Err(ref e) if e.kind() == ErrorKind::WouldBlock => { // This is changed println!("Would block"); ret = Ok(g.len - start_len); break; } Err(e) => { ret = Err(e); break; } } } ret }
//! NDS hardware functions. pub mod gpu_cmds; pub mod texture_formats; pub mod texture_params; pub mod decode_texture; pub use self::texture_formats::{TextureFormat, FormatDesc, Alpha}; pub use self::texture_params::TextureParams; pub use self::decode_texture::decode_texture;
// Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::star_chain_client::{faucet_sync, ChainClient, MockChainClient}; use libra_types::account_address::AccountAddress; use std::thread::spawn; #[test] fn test_mock_chain_client_faucet() { ::libra_logger::try_init_for_testing(); let (client, _handle) = MockChainClient::new(); for _i in 1..2 { let addr = AccountAddress::random(); faucet_sync(client.clone(), addr, 1000).unwrap(); faucet_sync(client.clone(), addr, 1000).unwrap(); faucet_sync(client.clone(), addr, 1000).unwrap(); assert_eq!(client.account_exist(&addr, None), true); } drop(client); } #[test] fn test_multi_mock_chain_client() { for _i in 1..3 { let (client, _handle) = MockChainClient::new(); spawn(move || { for _i in 1..2 { let addr = AccountAddress::random(); faucet_sync(client.clone(), addr, 1000).unwrap(); faucet_sync(client.clone(), addr, 1000).unwrap(); faucet_sync(client.clone(), addr, 1000).unwrap(); assert_eq!(client.account_exist(&addr, None), true); } drop(client); }); } }
//! Implementation of the first depth search algorithm use std::io::{stdout, Write}; use std::{thread, time}; use crossterm::{ cursor::{Hide, MoveTo}, execute, style::{Color, Colorize, PrintStyledContent, SetBackgroundColor, SetForegroundColor}, Result, }; use rand; use rand::distributions::{IndependentSample, Range}; use super::map::Map; use super::variables::{Direction, Position}; pub struct FirstDepthSearch { direction: Direction, map: Map, stack: Vec<Position>, root_pos: Position, is_terminated: bool, } impl FirstDepthSearch { pub fn new(map: Map, start_pos: Position) -> FirstDepthSearch { FirstDepthSearch { direction: Direction::Up, map, stack: Vec::new(), root_pos: start_pos, is_terminated: false, } } pub fn start(&mut self) -> Result<()> { self.is_terminated = false; // push first position on the stack self.stack.push(self.root_pos); execute!( stdout(), Hide, SetForegroundColor(Color::Green), SetBackgroundColor(Color::Black) )?; // loop until there are now items left in the stack. loop { if self.stack.len() == 0 { break; } self.choose_random_neighbor(); if self.is_terminated { break; } self.update_position(); let pos = self.root_pos.clone(); let x = pos.x as u16; let y = pos.y as u16; execute!( ::std::io::stdout(), MoveTo(x, y), PrintStyledContent(" ".on_yellow()) )?; thread::sleep(time::Duration::from_millis(1)); } Ok(()) } /// With this function we are choosing an random neighbor that we havent visited yet. fn choose_random_neighbor(&mut self) { let mut available_directions: Vec<Direction> = Vec::with_capacity(4); // check every direction if the direction is not visited we can add it to the list. // note that if the y or x is 0 that we don't want to subtract because we get an subtract overflow. if self.root_pos.y != 0 && !self .map .is_cell_visited(self.root_pos.x, self.root_pos.y - 1) { available_directions.push(Direction::Up) } if !&self .map .is_cell_visited(self.root_pos.x, self.root_pos.y + 1) { available_directions.push(Direction::Down) } if self.root_pos.x != 0 && !self .map .is_cell_visited(self.root_pos.x - 1, self.root_pos.y) { available_directions.push(Direction::Left) } if !&self .map .is_cell_visited(self.root_pos.x + 1, self.root_pos.y) { available_directions.push(Direction::Right) } let directions_count = available_directions.len(); // if there are no directions left we need to backtrack until we find directions to go to. if directions_count != 0 { let step = Range::new(0, directions_count); let mut rng = rand::thread_rng(); let choice = step.ind_sample(&mut rng); // set the current direction to the new random generated direction. self.direction = available_directions[choice]; } else { self.find_first_possible_direction(); } } /// Find direction to go to if there is no direction pop the current position of the stack for back tracking to the previous position. fn find_first_possible_direction(&mut self) { // if there are no elements left in the stack that means we have visited all cell and we van terminate the program. if let &Some(previous_cell) = &self.stack.pop() { // update root pos to previous cell and continue searching for new neighbours self.root_pos = previous_cell; self.choose_random_neighbor(); } else { self.is_terminated = true; } } /// update the root position to the new direction we went in fn update_position(&mut self) { match self.direction { Direction::Up => self.root_pos.y -= 1, Direction::Down => self.root_pos.y += 1, Direction::Left => self.root_pos.x -= 1, Direction::Right => self.root_pos.x += 1, }; self.map.set_visited(self.root_pos.x, self.root_pos.y); self.stack.push(self.root_pos); } }
pub mod client; mod frame;
use clap::{App, Arg}; use thunderbird_email_backup::run; use thunderbird_email_backup::runtime::Operation; fn main() { let matches = App::new("Thunderbird email backup") .version("0.1") .author("Richard Bradshaw") .about("Easily back up the email in a thunderbird profile folder") .arg( Arg::with_name("profile") .long("profile") .takes_value(true) .required(true) .help("The thunderbird profile to use. Use 'ls ~/.thunderbird' to find this"), ) .arg( Arg::with_name("output") .long("output") .takes_value(true) .required(true) .help("The path to output to"), ) .get_matches(); let profile = matches.value_of("profile").unwrap(); let output = matches.value_of("output").unwrap(); let operation = Operation::Backup(profile, output); match run(operation) { Ok(_) => (), Err(error) => { eprintln!("An error occurred: {}", error); panic!(); } } }
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::{models, API_VERSION}; #[non_exhaustive] #[derive(Debug, thiserror :: Error)] #[allow(non_camel_case_types)] pub enum Error { #[error(transparent)] Operations_List(#[from] operations::list::Error), #[error(transparent)] NetAppResource_CheckNameAvailability(#[from] net_app_resource::check_name_availability::Error), #[error(transparent)] NetAppResource_CheckFilePathAvailability(#[from] net_app_resource::check_file_path_availability::Error), #[error(transparent)] NetAppResource_CheckQuotaAvailability(#[from] net_app_resource::check_quota_availability::Error), #[error(transparent)] Accounts_ListBySubscription(#[from] accounts::list_by_subscription::Error), #[error(transparent)] Accounts_List(#[from] accounts::list::Error), #[error(transparent)] Accounts_Get(#[from] accounts::get::Error), #[error(transparent)] Accounts_CreateOrUpdate(#[from] accounts::create_or_update::Error), #[error(transparent)] Accounts_Update(#[from] accounts::update::Error), #[error(transparent)] Accounts_Delete(#[from] accounts::delete::Error), #[error(transparent)] Pools_List(#[from] pools::list::Error), #[error(transparent)] Pools_Get(#[from] pools::get::Error), #[error(transparent)] Pools_CreateOrUpdate(#[from] pools::create_or_update::Error), #[error(transparent)] Pools_Update(#[from] pools::update::Error), #[error(transparent)] Pools_Delete(#[from] pools::delete::Error), #[error(transparent)] Volumes_List(#[from] volumes::list::Error), #[error(transparent)] Volumes_Get(#[from] volumes::get::Error), #[error(transparent)] Volumes_CreateOrUpdate(#[from] volumes::create_or_update::Error), #[error(transparent)] Volumes_Update(#[from] volumes::update::Error), #[error(transparent)] Volumes_Delete(#[from] volumes::delete::Error), #[error(transparent)] Volumes_Revert(#[from] volumes::revert::Error), #[error(transparent)] Volumes_BreakReplication(#[from] volumes::break_replication::Error), #[error(transparent)] Volumes_ReplicationStatus(#[from] volumes::replication_status::Error), #[error(transparent)] Volumes_ResyncReplication(#[from] volumes::resync_replication::Error), #[error(transparent)] Volumes_DeleteReplication(#[from] volumes::delete_replication::Error), #[error(transparent)] Volumes_AuthorizeReplication(#[from] volumes::authorize_replication::Error), #[error(transparent)] Volumes_ReInitializeReplication(#[from] volumes::re_initialize_replication::Error), #[error(transparent)] Volumes_PoolChange(#[from] volumes::pool_change::Error), #[error(transparent)] Snapshots_List(#[from] snapshots::list::Error), #[error(transparent)] Snapshots_Get(#[from] snapshots::get::Error), #[error(transparent)] Snapshots_Create(#[from] snapshots::create::Error), #[error(transparent)] Snapshots_Update(#[from] snapshots::update::Error), #[error(transparent)] Snapshots_Delete(#[from] snapshots::delete::Error), #[error(transparent)] SnapshotPolicies_List(#[from] snapshot_policies::list::Error), #[error(transparent)] SnapshotPolicies_Get(#[from] snapshot_policies::get::Error), #[error(transparent)] SnapshotPolicies_Create(#[from] snapshot_policies::create::Error), #[error(transparent)] SnapshotPolicies_Update(#[from] snapshot_policies::update::Error), #[error(transparent)] SnapshotPolicies_Delete(#[from] snapshot_policies::delete::Error), #[error(transparent)] SnapshotPolicies_ListVolumes(#[from] snapshot_policies::list_volumes::Error), #[error(transparent)] Backups_GetStatus(#[from] backups::get_status::Error), #[error(transparent)] Backups_GetVolumeRestoreStatus(#[from] backups::get_volume_restore_status::Error), #[error(transparent)] AccountBackups_List(#[from] account_backups::list::Error), #[error(transparent)] AccountBackups_Get(#[from] account_backups::get::Error), #[error(transparent)] AccountBackups_Delete(#[from] account_backups::delete::Error), #[error(transparent)] Backups_List(#[from] backups::list::Error), #[error(transparent)] Backups_Get(#[from] backups::get::Error), #[error(transparent)] Backups_Create(#[from] backups::create::Error), #[error(transparent)] Backups_Update(#[from] backups::update::Error), #[error(transparent)] Backups_Delete(#[from] backups::delete::Error), #[error(transparent)] BackupPolicies_List(#[from] backup_policies::list::Error), #[error(transparent)] BackupPolicies_Get(#[from] backup_policies::get::Error), #[error(transparent)] BackupPolicies_Create(#[from] backup_policies::create::Error), #[error(transparent)] BackupPolicies_Update(#[from] backup_policies::update::Error), #[error(transparent)] BackupPolicies_Delete(#[from] backup_policies::delete::Error), #[error(transparent)] Vaults_List(#[from] vaults::list::Error), } pub mod operations { use super::{models, API_VERSION}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<models::OperationListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.NetApp/operations", operation_config.base_path(),); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::OperationListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod net_app_resource { use super::{models, API_VERSION}; pub async fn check_name_availability( operation_config: &crate::OperationConfig, body: &models::ResourceNameAvailabilityRequest, subscription_id: &str, location: &str, ) -> std::result::Result<models::CheckAvailabilityResponse, check_name_availability::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.NetApp/locations/{}/checkNameAvailability", operation_config.base_path(), subscription_id, location ); let mut url = url::Url::parse(url_str).map_err(check_name_availability::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(check_name_availability::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(check_name_availability::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(check_name_availability::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(check_name_availability::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CheckAvailabilityResponse = serde_json::from_slice(rsp_body) .map_err(|source| check_name_availability::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(check_name_availability::Error::DefaultResponse { status_code }), } } pub mod check_name_availability { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn check_file_path_availability( operation_config: &crate::OperationConfig, body: &models::FilePathAvailabilityRequest, subscription_id: &str, location: &str, ) -> std::result::Result<models::CheckAvailabilityResponse, check_file_path_availability::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.NetApp/locations/{}/checkFilePathAvailability", operation_config.base_path(), subscription_id, location ); let mut url = url::Url::parse(url_str).map_err(check_file_path_availability::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(check_file_path_availability::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(check_file_path_availability::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(check_file_path_availability::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(check_file_path_availability::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CheckAvailabilityResponse = serde_json::from_slice(rsp_body) .map_err(|source| check_file_path_availability::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(check_file_path_availability::Error::DefaultResponse { status_code }), } } pub mod check_file_path_availability { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn check_quota_availability( operation_config: &crate::OperationConfig, body: &models::QuotaAvailabilityRequest, subscription_id: &str, location: &str, ) -> std::result::Result<models::CheckAvailabilityResponse, check_quota_availability::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.NetApp/locations/{}/checkQuotaAvailability", operation_config.base_path(), subscription_id, location ); let mut url = url::Url::parse(url_str).map_err(check_quota_availability::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(check_quota_availability::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(check_quota_availability::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(check_quota_availability::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(check_quota_availability::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CheckAvailabilityResponse = serde_json::from_slice(rsp_body) .map_err(|source| check_quota_availability::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(check_quota_availability::Error::DefaultResponse { status_code }), } } pub mod check_quota_availability { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod accounts { use super::{models, API_VERSION}; pub async fn list_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<models::NetAppAccountList, list_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.NetApp/netAppAccounts", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_subscription::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_subscription::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::NetAppAccountList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_by_subscription::Error::DefaultResponse { status_code }), } } pub mod list_by_subscription { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<models::NetAppAccountList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::NetAppAccountList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::NetAppAccount, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::NetAppAccount = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, body: &models::NetAppAccount, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::NetAppAccount = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::NetAppAccount = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => Err(create_or_update::Error::DefaultResponse { status_code }), } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::NetAppAccount), Created201(models::NetAppAccount), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, body: &models::NetAppAccountPatch, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::NetAppAccount = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => { let rsp_body = rsp.body(); let rsp_value: models::NetAppAccount = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Accepted202(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::CloudError = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::NetAppAccount), Accepted202(models::NetAppAccount), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod pools { use super::{models, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::CapacityPoolList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CapacityPoolList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, ) -> std::result::Result<models::CapacityPool, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CapacityPool = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, body: &models::CapacityPool, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CapacityPool = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::CapacityPool = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => Err(create_or_update::Error::DefaultResponse { status_code }), } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::CapacityPool), Created201(models::CapacityPool), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, body: &models::CapacityPoolPatch, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CapacityPool = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(update::Response::Accepted202), status_code => Err(update::Error::DefaultResponse { status_code }), } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::CapacityPool), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod volumes { use super::{models, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, ) -> std::result::Result<models::VolumeList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VolumeList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<models::Volume, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Volume = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, body: &models::Volume, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Volume = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::Volume = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Created201(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202), status_code => Err(create_or_update::Error::DefaultResponse { status_code }), } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::Volume), Created201(models::Volume), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, body: &models::VolumePatch, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Volume = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(update::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: models::CloudError = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::Volume), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn revert( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, body: &models::VolumeRevert, ) -> std::result::Result<revert::Response, revert::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/revert", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name ); let mut url = url::Url::parse(url_str).map_err(revert::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(revert::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(revert::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(revert::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(revert::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(revert::Response::Ok200), http::StatusCode::ACCEPTED => Ok(revert::Response::Accepted202), status_code => Err(revert::Error::DefaultResponse { status_code }), } } pub mod revert { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn break_replication( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, body: Option<&models::BreakReplicationRequest>, ) -> std::result::Result<break_replication::Response, break_replication::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/breakReplication" , operation_config . base_path () , subscription_id , resource_group_name , account_name , pool_name , volume_name) ; let mut url = url::Url::parse(url_str).map_err(break_replication::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(break_replication::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(break_replication::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(break_replication::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(break_replication::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(break_replication::Response::Ok200), http::StatusCode::ACCEPTED => Ok(break_replication::Response::Accepted202), status_code => Err(break_replication::Error::DefaultResponse { status_code }), } } pub mod break_replication { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn replication_status( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<models::ReplicationStatus, replication_status::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/replicationStatus" , operation_config . base_path () , subscription_id , resource_group_name , account_name , pool_name , volume_name) ; let mut url = url::Url::parse(url_str).map_err(replication_status::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(replication_status::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(replication_status::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(replication_status::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ReplicationStatus = serde_json::from_slice(rsp_body) .map_err(|source| replication_status::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(replication_status::Error::DefaultResponse { status_code }), } } pub mod replication_status { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn resync_replication( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<resync_replication::Response, resync_replication::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/resyncReplication" , operation_config . base_path () , subscription_id , resource_group_name , account_name , pool_name , volume_name) ; let mut url = url::Url::parse(url_str).map_err(resync_replication::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(resync_replication::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(resync_replication::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(resync_replication::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(resync_replication::Response::Ok200), http::StatusCode::ACCEPTED => Ok(resync_replication::Response::Accepted202), status_code => Err(resync_replication::Error::DefaultResponse { status_code }), } } pub mod resync_replication { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_replication( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<delete_replication::Response, delete_replication::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/deleteReplication" , operation_config . base_path () , subscription_id , resource_group_name , account_name , pool_name , volume_name) ; let mut url = url::Url::parse(url_str).map_err(delete_replication::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_replication::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete_replication::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_replication::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete_replication::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete_replication::Response::Accepted202), status_code => Err(delete_replication::Error::DefaultResponse { status_code }), } } pub mod delete_replication { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn authorize_replication( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, body: &models::AuthorizeRequest, ) -> std::result::Result<authorize_replication::Response, authorize_replication::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/authorizeReplication" , operation_config . base_path () , subscription_id , resource_group_name , account_name , pool_name , volume_name) ; let mut url = url::Url::parse(url_str).map_err(authorize_replication::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(authorize_replication::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(authorize_replication::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(authorize_replication::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(authorize_replication::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(authorize_replication::Response::Ok200), http::StatusCode::ACCEPTED => Ok(authorize_replication::Response::Accepted202), status_code => Err(authorize_replication::Error::DefaultResponse { status_code }), } } pub mod authorize_replication { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn re_initialize_replication( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<re_initialize_replication::Response, re_initialize_replication::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/reinitializeReplication" , operation_config . base_path () , subscription_id , resource_group_name , account_name , pool_name , volume_name) ; let mut url = url::Url::parse(url_str).map_err(re_initialize_replication::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(re_initialize_replication::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(re_initialize_replication::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(re_initialize_replication::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(re_initialize_replication::Response::Ok200), http::StatusCode::ACCEPTED => Ok(re_initialize_replication::Response::Accepted202), status_code => Err(re_initialize_replication::Error::DefaultResponse { status_code }), } } pub mod re_initialize_replication { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn pool_change( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, body: &models::PoolChangeRequest, ) -> std::result::Result<pool_change::Response, pool_change::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/poolChange", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name ); let mut url = url::Url::parse(url_str).map_err(pool_change::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(pool_change::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(pool_change::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(pool_change::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(pool_change::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(pool_change::Response::Ok200), http::StatusCode::ACCEPTED => Ok(pool_change::Response::Accepted202), status_code => Err(pool_change::Error::DefaultResponse { status_code }), } } pub mod pool_change { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod snapshots { use super::{models, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<models::SnapshotsList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/snapshots", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SnapshotsList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, snapshot_name: &str, ) -> std::result::Result<models::Snapshot, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/snapshots/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name, snapshot_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Snapshot = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, body: &models::Snapshot, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, snapshot_name: &str, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/snapshots/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name, snapshot_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(create::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::Snapshot = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202), status_code => Err(create::Error::DefaultResponse { status_code }), } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Created201(models::Snapshot), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, body: &models::SnapshotPatch, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, snapshot_name: &str, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/snapshots/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name, snapshot_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Snapshot = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(update::Response::Accepted202), status_code => Err(update::Error::DefaultResponse { status_code }), } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::Snapshot), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, snapshot_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/snapshots/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name, snapshot_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod snapshot_policies { use super::{models, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::SnapshotPoliciesList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/snapshotPolicies", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SnapshotPoliciesList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, snapshot_policy_name: &str, ) -> std::result::Result<models::SnapshotPolicy, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/snapshotPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, snapshot_policy_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SnapshotPolicy = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, snapshot_policy_name: &str, body: &models::SnapshotPolicy, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/snapshotPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, snapshot_policy_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(create::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SnapshotPolicy = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::SnapshotPolicy = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } status_code => Err(create::Error::DefaultResponse { status_code }), } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::SnapshotPolicy), Created201(models::SnapshotPolicy), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, snapshot_policy_name: &str, body: &models::SnapshotPolicyPatch, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/snapshotPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, snapshot_policy_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SnapshotPolicy = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => { let rsp_body = rsp.body(); let rsp_value: models::SnapshotPolicy = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Accepted202(rsp_value)) } status_code => Err(update::Error::DefaultResponse { status_code }), } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::SnapshotPolicy), Accepted202(models::SnapshotPolicy), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, snapshot_policy_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/snapshotPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, snapshot_policy_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_volumes( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, snapshot_policy_name: &str, ) -> std::result::Result<models::SnapshotPolicyVolumeList, list_volumes::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/snapshotPolicies/{}/volumes", operation_config.base_path(), subscription_id, resource_group_name, account_name, snapshot_policy_name ); let mut url = url::Url::parse(url_str).map_err(list_volumes::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_volumes::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_volumes::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_volumes::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SnapshotPolicyVolumeList = serde_json::from_slice(rsp_body).map_err(|source| list_volumes::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list_volumes::Error::DefaultResponse { status_code }), } } pub mod list_volumes { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod backups { use super::{models, API_VERSION}; pub async fn get_status( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<models::BackupStatus, get_status::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/backupStatus", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name ); let mut url = url::Url::parse(url_str).map_err(get_status::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_status::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_status::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_status::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::BackupStatus = serde_json::from_slice(rsp_body).map_err(|source| get_status::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get_status::Error::DefaultResponse { status_code }), } } pub mod get_status { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_volume_restore_status( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<models::RestoreStatus, get_volume_restore_status::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/restoreStatus", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name ); let mut url = url::Url::parse(url_str).map_err(get_volume_restore_status::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_volume_restore_status::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_volume_restore_status::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_volume_restore_status::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::RestoreStatus = serde_json::from_slice(rsp_body) .map_err(|source| get_volume_restore_status::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get_volume_restore_status::Error::DefaultResponse { status_code }), } } pub mod get_volume_restore_status { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, ) -> std::result::Result<models::BackupsList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/backups", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::BackupsList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, backup_name: &str, ) -> std::result::Result<models::Backup, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/backups/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name, backup_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Backup = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, backup_name: &str, body: &models::Backup, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/backups/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name, backup_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(create::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Backup = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::Backup = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202), status_code => Err(create::Error::DefaultResponse { status_code }), } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::Backup), Created201(models::Backup), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, backup_name: &str, body: Option<&models::BackupPatch>, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/backups/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name, backup_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(body) = body { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(body).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Backup = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => { let rsp_body = rsp.body(); let rsp_value: models::Backup = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Accepted202(rsp_value)) } status_code => Err(update::Error::DefaultResponse { status_code }), } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::Backup), Accepted202(models::Backup), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, pool_name: &str, volume_name: &str, backup_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/capacityPools/{}/volumes/{}/backups/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, pool_name, volume_name, backup_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod account_backups { use super::{models, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::BackupsList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/accountBackups", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::BackupsList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, backup_name: &str, ) -> std::result::Result<models::Backup, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/accountBackups/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, backup_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Backup = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, backup_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/accountBackups/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, backup_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod backup_policies { use super::{models, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::BackupPoliciesList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/backupPolicies", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::BackupPoliciesList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, backup_policy_name: &str, ) -> std::result::Result<models::BackupPolicy, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/backupPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, backup_policy_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::BackupPolicy = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(get::Error::DefaultResponse { status_code }), } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, backup_policy_name: &str, body: &models::BackupPolicy, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/backupPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, backup_policy_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(create::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::BackupPolicy = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: models::BackupPolicy = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202), status_code => Err(create::Error::DefaultResponse { status_code }), } } pub mod create { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::BackupPolicy), Created201(models::BackupPolicy), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, backup_policy_name: &str, body: &models::BackupPolicyPatch, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/backupPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, backup_policy_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(body).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::BackupPolicy = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => { let rsp_body = rsp.body(); let rsp_value: models::BackupPolicy = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Accepted202(rsp_value)) } status_code => Err(update::Error::DefaultResponse { status_code }), } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::BackupPolicy), Accepted202(models::BackupPolicy), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, backup_policy_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/backupPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, backup_policy_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => Err(delete::Error::DefaultResponse { status_code }), } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod vaults { use super::{models, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::VaultList, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.NetApp/netAppAccounts/{}/vaults", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::VaultList = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => Err(list::Error::DefaultResponse { status_code }), } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } }
fn main() { #[cfg(target_os = "windows")] download_windows_npcap_sdk().unwrap(); } #[cfg(target_os = "windows")] fn download_windows_npcap_sdk() -> anyhow::Result<()> { use std::{ env, fs, io::{self, Write}, path::PathBuf, }; use anyhow::anyhow; use http_req::request; use zip::ZipArchive; println!("cargo:rerun-if-changed=build.rs"); // get npcap SDK const NPCAP_SDK: &str = "npcap-sdk-1.13.zip"; let npcap_sdk_download_url = format!("https://npcap.com/dist/{NPCAP_SDK}"); let cache_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?).join("target"); let npcap_sdk_cache_path = cache_dir.join(NPCAP_SDK); let npcap_zip = match fs::read(&npcap_sdk_cache_path) { // use cached Ok(zip_data) => { eprintln!("Found cached npcap SDK"); zip_data } // download SDK Err(_) => { eprintln!("Downloading npcap SDK"); // download let mut zip_data = vec![]; let _res = request::get(npcap_sdk_download_url, &mut zip_data)?; // write cache fs::create_dir_all(cache_dir)?; let mut cache = fs::File::create(npcap_sdk_cache_path)?; cache.write_all(&zip_data)?; zip_data } }; // extract DLL let lib_path = if cfg!(target_arch = "aarch64") { "Lib/ARM64/Packet.lib" } else if cfg!(target_arch = "x86_64") { "Lib/x64/Packet.lib" } else if cfg!(target_arch = "x86") { "Lib/Packet.lib" } else { panic!("Unsupported target!") }; let mut archive = ZipArchive::new(io::Cursor::new(npcap_zip))?; let mut npcap_lib = archive.by_name(lib_path)?; // write DLL let lib_dir = PathBuf::from(env::var("OUT_DIR")?).join("npcap_sdk"); let lib_path = lib_dir.join("Packet.lib"); fs::create_dir_all(&lib_dir)?; let mut lib_file = fs::File::create(lib_path)?; io::copy(&mut npcap_lib, &mut lib_file)?; println!( "cargo:rustc-link-search=native={}", lib_dir .to_str() .ok_or(anyhow!("{lib_dir:?} is not valid UTF-8"))? ); Ok(()) }
/* This module contains structs for representing a card and methods to determine if three cards * form a set. It also contains an enum for the result of looking for a set in a hand. */ use indexmap::set::IndexSet; /* State for each characteristic of a card */ #[derive(Hash, Eq, PartialEq, Copy, Clone, Debug)] pub enum State { Zero, One, Two, } /* A card has 4 characteristics that each have a state */ #[derive(Hash, Eq, PartialEq, Copy, Clone, Debug)] pub struct Card(pub State, pub State, pub State, pub State); /* Constructor for creating a State of a card characteristic */ impl State { pub fn new(state: usize) -> Self { match state { 0 => State::Zero, 1 => State::One, 2 => State::Two, _ => panic!("Impossible card state!"), } } } /* A set is 3 cards */ #[derive(Debug)] pub struct Set { pub cards: IndexSet<Card>, } impl Set { pub fn new(set: [Card; 3]) -> Self { Set { cards: set.iter().cloned().collect(), } } } /* Function to check if a trio of cards is a set. In the game of * set every card has 4 attributes with 3 states. 3 cards make a * set if for each attribute the cards are of the same state or all * different states. When the states are represented by the numbers * 0, 1, and 2, modulo 3 can be used to determine if each attribute * passes the set requirements. */ pub fn is_set(first: &Card, second: &Card, third: &Card) -> bool { check(first.0, second.0, third.0) && check(first.1, second.1, third.1) && check(first.2, second.2, third.2) && check(first.3, second.3, third.3) } /* Checks the states for a single characteristic of a card */ fn check(first: State, second: State, third: State) -> bool { (first as i32 + second as i32 + third as i32) % 3 == 0 }
use super::super::ui; use super::prelude::*; pub fn clear(state: &ui::State) { show_msg(state, ""); } pub fn show_msg(state: &ui::State, msg: &str) { let statusbar: gtk::Statusbar = state.get_statusbar(); statusbar.remove_all(0); statusbar.push(0, msg); }
use anyhow::Result; use uvm_cli; use uvm_core; use console::style; use std::env; use std::path::PathBuf; use structopt::{ clap::crate_authors, clap::crate_description, clap::crate_version, clap::AppSettings, StructOpt, }; use uvm_cli::{options::ColorOption, set_colors_enabled, set_loglevel}; const SETTINGS: &'static [AppSettings] = &[ AppSettings::ColoredHelp, AppSettings::DontCollapseArgsInUsage, ]; #[derive(StructOpt, Debug)] #[structopt(version = crate_version!(), author = crate_authors!(), about = crate_description!(), settings = SETTINGS)] struct Opts { /// path to project directory. project_path: Option<PathBuf>, /// Detects a unity version recursivly from current working directory. /// With this flag set, the tool returns the first version it finds. #[structopt(short, long)] recursive: bool, /// print debug output #[structopt(short, long)] debug: bool, /// print more output #[structopt(short, long, parse(from_occurrences))] verbose: i32, /// Color:. #[structopt(short, long, possible_values = &ColorOption::variants(), case_insensitive = true, default_value)] color: ColorOption, } fn main() -> Result<()> { let opt = Opts::from_args(); set_colors_enabled(&opt.color); set_loglevel(opt.debug.then(|| 2).unwrap_or(opt.verbose)); let project_version = uvm_core::dectect_project_version( &opt.project_path.unwrap_or(env::current_dir()?), Some(opt.recursive), )?; println!("{}", style(project_version.to_string()).green().bold()); Ok(()) }
extern crate js_sys; extern crate web_sys; mod utils; use wasm_bindgen::prelude::*; // When the `wee_alloc` feature is enabled, use `wee_alloc` as the global // allocator. #[cfg(feature = "wee_alloc")] #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; #[wasm_bindgen] extern { fn alert(s: &str); } #[wasm_bindgen] pub fn greet() { alert("Hello, wasm-template-rust!"); } #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } }
use std::io; fn confirm_choice(prompt: &str) -> bool { let mut answer = String::new(); println!("{}", prompt); io::stdin().read_line(&mut answer) .expect("Failed to read line"); if answer.trim() == "y" || answer.trim() == "Y" { true } else { false } } fn new_entry() { let mut author = String::new(); let mut book = String::new(); if confirm_choice("You'd like to add a new entry? (y/n)") == false { println!("Terminating Program"); return; } println!("Enter the name of the author"); io::stdin().read_line(&mut author) .expect("Failed to read author"); println!("Enter the name of the book"); io::stdin().read_line(&mut book) .expect("Failed to read book"); println!("You want to add {} by {}", book.trim(), author.trim()); } fn main() { new_entry(); }
use crate::server::Events; use log::{error, trace}; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use std::collections::BTreeMap; use tokio::io::AsyncBufReadExt; use tokio::sync::RwLock; const TWO_HOURS: std::time::Duration = std::time::Duration::from_secs(3600 * 2); #[derive(Deserialize, Clone, PartialEq)] pub enum TrackerMode { /// In static mode torrents are tracked only if they were added ahead of time. #[serde(rename = "static")] Static, /// In dynamic mode, torrents are tracked being added ahead of time. #[serde(rename = "dynamic")] Dynamic, /// Tracker will only serve authenticated peers. #[serde(rename = "private")] Private, } #[derive(Clone, Serialize)] pub struct TorrentPeer { ip: std::net::SocketAddr, uploaded: u64, downloaded: u64, left: u64, event: Events, #[serde(serialize_with = "ser_instant")] updated: std::time::Instant, } fn ser_instant<S: serde::Serializer>(inst: &std::time::Instant, ser: S) -> Result<S::Ok, S::Error> { ser.serialize_u64(inst.elapsed().as_millis() as u64) } #[derive(Ord, PartialOrd, PartialEq, Eq, Clone)] pub struct InfoHash { info_hash: [u8; 20], } impl std::fmt::Display for InfoHash { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let mut chars = [0u8; 40]; binascii::bin2hex(&self.info_hash, &mut chars).expect("failed to hexlify"); write!(f, "{}", std::str::from_utf8(&chars).unwrap()) } } impl std::str::FromStr for InfoHash { type Err = binascii::ConvertError; fn from_str(s: &str) -> Result<Self, Self::Err> { let mut i = Self { info_hash: [0u8; 20] }; if s.len() != 40 { return Err(binascii::ConvertError::InvalidInputLength); } binascii::hex2bin(s.as_bytes(), &mut i.info_hash)?; Ok(i) } } impl std::convert::From<&[u8]> for InfoHash { fn from(data: &[u8]) -> InfoHash { assert_eq!(data.len(), 20); let mut ret = InfoHash { info_hash: [0u8; 20] }; ret.info_hash.clone_from_slice(data); ret } } impl From<[u8; 20]> for InfoHash { fn from(info_hash: [u8; 20]) -> Self { InfoHash { info_hash, } } } impl serde::ser::Serialize for InfoHash { fn serialize<S: serde::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut buffer = [0u8; 40]; let bytes_out = binascii::bin2hex(&self.info_hash, &mut buffer).ok().unwrap(); let str_out = std::str::from_utf8(bytes_out).unwrap(); serializer.serialize_str(str_out) } } struct InfoHashVisitor; impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { type Value = InfoHash; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { write!(formatter, "a 40 character long hash") } fn visit_str<E: serde::de::Error>(self, v: &str) -> Result<Self::Value, E> { if v.len() != 40 { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(v), &"expected a 40 character long string", )); } let mut res = InfoHash { info_hash: [0u8; 20] }; if binascii::hex2bin(v.as_bytes(), &mut res.info_hash).is_err() { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(v), &"expected a hexadecimal string", )); } else { Ok(res) } } } impl<'de> serde::de::Deserialize<'de> for InfoHash { fn deserialize<D: serde::de::Deserializer<'de>>(des: D) -> Result<Self, D::Error> { des.deserialize_str(InfoHashVisitor) } } #[repr(transparent)] #[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq)] pub struct PeerId([u8; 20]); impl PeerId { pub fn from_array(v: &[u8; 20]) -> &PeerId { unsafe { // This is safe since PeerId's repr is transparent and content's are identical. PeerId == [0u8; 20] core::mem::transmute(v) } } pub fn get_client_name(&self) -> Option<&'static str> { if self.0[0] == b'M' { return Some("BitTorrent"); } if self.0[0] == b'-' { let name = match &self.0[1..3] { b"AG" => "Ares", b"A~" => "Ares", b"AR" => "Arctic", b"AV" => "Avicora", b"AX" => "BitPump", b"AZ" => "Azureus", b"BB" => "BitBuddy", b"BC" => "BitComet", b"BF" => "Bitflu", b"BG" => "BTG (uses Rasterbar libtorrent)", b"BR" => "BitRocket", b"BS" => "BTSlave", b"BX" => "~Bittorrent X", b"CD" => "Enhanced CTorrent", b"CT" => "CTorrent", b"DE" => "DelugeTorrent", b"DP" => "Propagate Data Client", b"EB" => "EBit", b"ES" => "electric sheep", b"FT" => "FoxTorrent", b"FW" => "FrostWire", b"FX" => "Freebox BitTorrent", b"GS" => "GSTorrent", b"HL" => "Halite", b"HN" => "Hydranode", b"KG" => "KGet", b"KT" => "KTorrent", b"LH" => "LH-ABC", b"LP" => "Lphant", b"LT" => "libtorrent", b"lt" => "libTorrent", b"LW" => "LimeWire", b"MO" => "MonoTorrent", b"MP" => "MooPolice", b"MR" => "Miro", b"MT" => "MoonlightTorrent", b"NX" => "Net Transport", b"PD" => "Pando", b"qB" => "qBittorrent", b"QD" => "QQDownload", b"QT" => "Qt 4 Torrent example", b"RT" => "Retriever", b"S~" => "Shareaza alpha/beta", b"SB" => "~Swiftbit", b"SS" => "SwarmScope", b"ST" => "SymTorrent", b"st" => "sharktorrent", b"SZ" => "Shareaza", b"TN" => "TorrentDotNET", b"TR" => "Transmission", b"TS" => "Torrentstorm", b"TT" => "TuoTu", b"UL" => "uLeecher!", b"UT" => "µTorrent", b"UW" => "µTorrent Web", b"VG" => "Vagaa", b"WD" => "WebTorrent Desktop", b"WT" => "BitLet", b"WW" => "WebTorrent", b"WY" => "FireTorrent", b"XL" => "Xunlei", b"XT" => "XanTorrent", b"XX" => "Xtorrent", b"ZT" => "ZipTorrent", _ => return None, }; Some(name) } else { None } } } impl Serialize for PeerId { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { let mut tmp = [0u8; 40]; binascii::bin2hex(&self.0, &mut tmp).unwrap(); let id = std::str::from_utf8(&tmp).ok(); #[derive(Serialize)] struct PeerIdInfo<'a> { id: Option<&'a str>, client: Option<&'a str>, } let obj = PeerIdInfo { id, client: self.get_client_name(), }; obj.serialize(serializer) } } #[derive(Serialize, Deserialize, Clone)] pub struct TorrentEntry { is_flagged: bool, #[serde(skip)] peers: std::collections::BTreeMap<PeerId, TorrentPeer>, completed: u32, #[serde(skip)] seeders: u32, } impl TorrentEntry { pub fn new() -> TorrentEntry { TorrentEntry { is_flagged: false, peers: std::collections::BTreeMap::new(), completed: 0, seeders: 0, } } pub fn is_flagged(&self) -> bool { self.is_flagged } pub fn update_peer( &mut self, peer_id: &PeerId, remote_address: &std::net::SocketAddr, uploaded: u64, downloaded: u64, left: u64, event: Events, ) { let is_seeder = left == 0 && uploaded > 0; let mut was_seeder = false; let mut is_completed = left == 0 && (event as u32) == (Events::Complete as u32); if let Some(prev) = self.peers.insert(*peer_id, TorrentPeer { updated: std::time::Instant::now(), left, downloaded, uploaded, ip: *remote_address, event, }) { was_seeder = prev.left == 0 && prev.uploaded > 0; if is_completed && (prev.event as u32) == (Events::Complete as u32) { // don't update count again. a torrent should only be updated once per peer. is_completed = false; } } if is_seeder && !was_seeder { self.seeders += 1; } else if was_seeder && !is_seeder { self.seeders -= 1; } if is_completed { self.completed += 1; } } pub fn get_peers(&self, remote_addr: &std::net::SocketAddr) -> Vec<std::net::SocketAddr> { let mut list = Vec::new(); for (_, peer) in self .peers .iter() .filter(|e| e.1.ip.is_ipv4() == remote_addr.is_ipv4()) .take(74) { if peer.ip == *remote_addr { continue; } list.push(peer.ip); } list } pub fn get_peers_iter(&self) -> impl Iterator<Item = (&PeerId, &TorrentPeer)> { self.peers.iter() } pub fn get_stats(&self) -> (u32, u32, u32) { let leechers = (self.peers.len() as u32) - self.seeders; (self.seeders, self.completed, leechers) } } struct TorrentDatabase { torrent_peers: tokio::sync::RwLock<std::collections::BTreeMap<InfoHash, TorrentEntry>>, } impl Default for TorrentDatabase { fn default() -> Self { TorrentDatabase { torrent_peers: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), } } } pub struct TorrentTracker { mode: TrackerMode, database: TorrentDatabase, } #[derive(Serialize, Deserialize)] struct DatabaseRow<'a> { info_hash: InfoHash, entry: Cow<'a, TorrentEntry>, } pub enum TorrentStats { TorrentFlagged, TorrentNotRegistered, Stats { seeders: u32, leechers: u32, complete: u32 }, } impl TorrentTracker { pub fn new(mode: TrackerMode) -> TorrentTracker { TorrentTracker { mode, database: TorrentDatabase { torrent_peers: RwLock::new(std::collections::BTreeMap::new()), }, } } pub async fn load_database<R: tokio::io::AsyncRead + Unpin>( mode: TrackerMode, reader: &mut R, ) -> Result<TorrentTracker, std::io::Error> { let reader = tokio::io::BufReader::new(reader); let reader = async_compression::tokio::bufread::BzDecoder::new(reader); let reader = tokio::io::BufReader::new(reader); let res = TorrentTracker::new(mode); let mut db = res.database.torrent_peers.write().await; let mut records = reader.lines(); loop { let line = match records.next_line().await { Ok(Some(v)) => v, Ok(None) => break, Err(ref err) => { error!("failed to read lines! {}", err); continue; }, }; let row: DatabaseRow = match serde_json::from_str(&line) { Ok(v) => v, Err(err) => { error!("failed to parse json: {}", err); continue; } }; let entry = row.entry.into_owned(); let infohash = row.info_hash; db.insert(infohash, entry); } trace!("loaded {} entries from database", db.len()); drop(db); Ok(res) } /// Adding torrents is not relevant to dynamic trackers. pub async fn add_torrent(&self, info_hash: &InfoHash) -> Result<(), ()> { let mut write_lock = self.database.torrent_peers.write().await; match write_lock.entry(info_hash.clone()) { std::collections::btree_map::Entry::Vacant(ve) => { ve.insert(TorrentEntry::new()); Ok(()) }, std::collections::btree_map::Entry::Occupied(_entry) => Err(()), } } /// If the torrent is flagged, it will not be removed unless force is set to true. pub async fn remove_torrent(&self, info_hash: &InfoHash, force: bool) -> Result<(), ()> { use std::collections::btree_map::Entry; let mut entry_lock = self.database.torrent_peers.write().await; let torrent_entry = entry_lock.entry(info_hash.clone()); match torrent_entry { Entry::Vacant(_) => { // no entry, nothing to do... } Entry::Occupied(entry) => { if force || !entry.get().is_flagged() { entry.remove(); return Ok(()); } } } Err(()) } /// flagged torrents will result in a tracking error. This is to allow enforcement against piracy. pub async fn set_torrent_flag(&self, info_hash: &InfoHash, is_flagged: bool) -> bool { if let Some(entry) = self.database.torrent_peers.write().await.get_mut(info_hash) { if is_flagged && !entry.is_flagged { // empty peer list. entry.peers.clear(); } entry.is_flagged = is_flagged; true } else { false } } pub async fn get_torrent_peers( &self, info_hash: &InfoHash, remote_addr: &std::net::SocketAddr, ) -> Option<Vec<std::net::SocketAddr>> { let read_lock = self.database.torrent_peers.read().await; read_lock .get(info_hash) .map(|entry| entry.get_peers(remote_addr)) } pub async fn update_torrent_and_get_stats( &self, info_hash: &InfoHash, peer_id: &PeerId, remote_address: &std::net::SocketAddr, uploaded: u64, downloaded: u64, left: u64, event: Events, ) -> TorrentStats { use std::collections::btree_map::Entry; let mut torrent_peers = self.database.torrent_peers.write().await; let torrent_entry = match torrent_peers.entry(info_hash.clone()) { Entry::Vacant(vacant) => { match self.mode { TrackerMode::Dynamic => vacant.insert(TorrentEntry::new()), _ => { return TorrentStats::TorrentNotRegistered; } } } Entry::Occupied(entry) => { if entry.get().is_flagged() { return TorrentStats::TorrentFlagged; } entry.into_mut() } }; torrent_entry.update_peer(peer_id, remote_address, uploaded, downloaded, left, event); let (seeders, complete, leechers) = torrent_entry.get_stats(); TorrentStats::Stats { seeders, leechers, complete, } } pub(crate) async fn get_database(&self) -> tokio::sync::RwLockReadGuard<'_, BTreeMap<InfoHash, TorrentEntry>> { self.database.torrent_peers.read().await } pub async fn save_database<W: tokio::io::AsyncWrite + Unpin>(&self, w: W) -> Result<(), std::io::Error> { use tokio::io::AsyncWriteExt; let mut writer = async_compression::tokio::write::BzEncoder::new(w); let db_lock = self.database.torrent_peers.read().await; let db: &BTreeMap<InfoHash, TorrentEntry> = &*db_lock; let mut tmp = Vec::with_capacity(4096); for row in db { let entry = DatabaseRow { info_hash: row.0.clone(), entry: Cow::Borrowed(row.1), }; tmp.clear(); if let Err(err) = serde_json::to_writer(&mut tmp, &entry) { error!("failed to serialize: {}", err); continue; }; tmp.push(b'\n'); writer.write_all(&tmp).await?; } writer.flush().await?; Ok(()) } async fn cleanup(&self) { let mut lock = self.database.torrent_peers.write().await; let db: &mut BTreeMap<InfoHash, TorrentEntry> = &mut *lock; let mut torrents_to_remove = Vec::new(); for (k, v) in db.iter_mut() { // timed-out peers.. { let mut peers_to_remove = Vec::new(); let torrent_peers = &mut v.peers; for (peer_id, state) in torrent_peers.iter() { if state.updated.elapsed() > TWO_HOURS { // over 2 hours past since last update... peers_to_remove.push(*peer_id); } } for peer_id in peers_to_remove.iter() { torrent_peers.remove(peer_id); } } if self.mode == TrackerMode::Dynamic { // peer-less torrents.. if v.peers.is_empty() && !v.is_flagged() { torrents_to_remove.push(k.clone()); } } } for info_hash in torrents_to_remove { db.remove(&info_hash); } } pub async fn periodic_task(&self, db_path: &str) { // cleanup db self.cleanup().await; // save journal db. let mut journal_path = std::path::PathBuf::from(db_path); let mut filename = String::from(journal_path.file_name().unwrap().to_str().unwrap()); filename.push_str("-journal"); journal_path.set_file_name(filename.as_str()); let jp_str = journal_path.as_path().to_str().unwrap(); // scope to make sure backup file is dropped/closed. { let mut file = match tokio::fs::File::create(jp_str).await { Err(err) => { error!("failed to open file '{}': {}", db_path, err); return; } Ok(v) => v, }; trace!("writing database to {}", jp_str); if let Err(err) = self.save_database(&mut file).await { error!("failed saving database. {}", err); return; } } // overwrite previous db trace!("renaming '{}' to '{}'", jp_str, db_path); if let Err(err) = tokio::fs::rename(jp_str, db_path).await { error!("failed to move db backup. {}", err); } } } #[cfg(test)] mod tests { use super::*; fn is_sync<T: Sync>() {} fn is_send<T: Send>() {} #[test] fn tracker_send() { is_send::<TorrentTracker>(); } #[test] fn tracker_sync() { is_sync::<TorrentTracker>(); } #[tokio::test] async fn test_save_db() { let tracker = TorrentTracker::new(TrackerMode::Dynamic); tracker .add_torrent(&[0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0].into()) .await .expect("failed to add torrent"); let mut out = Vec::new(); let mut cursor = std::io::Cursor::new(&mut out); tracker.save_database(&mut cursor).await.expect("db save failed"); assert!(cursor.position() > 0); } #[test] fn test_infohash_de() { use serde_json; let ih: InfoHash = [0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1].into(); let serialized_ih = serde_json::to_string(&ih).unwrap(); let de_ih: InfoHash = serde_json::from_str(serialized_ih.as_str()).unwrap(); assert!(de_ih == ih); } }