text stringlengths 8 4.13M |
|---|
use crate::ast::{Expr, Stmt};
use super::{
value::{Function, Value},
Interpreter, StmtResult,
};
impl Interpreter {
pub fn eval_stmt(&mut self, stmt: &Stmt) -> StmtResult {
match stmt {
Stmt::FnDef {
ident,
params,
body,
} => self.eval_fndef(ident, params, body),
Stmt::Set { ident, expr } => self.eval_set(ident, expr),
Stmt::Push(expr) => self.eval_push(expr),
Stmt::Print(expr) => self.eval_print(expr),
Stmt::FnCall(ident) => self.eval_fncall(ident),
Stmt::Pop => self.eval_pop(),
}
}
fn eval_fndef(&mut self, ident: &str, params: &[String], body: &[Stmt]) -> StmtResult {
let function = Function {
ident: ident.to_string(),
params: params.to_vec(),
body: body.to_vec(),
};
self.env.set(ident.to_string(), Value::Function(function));
Ok(())
}
fn eval_set(&mut self, ident: &str, expr: &Expr) -> StmtResult {
let expr = self.eval_expr(expr)?;
self.env.set(ident.to_string(), expr);
Ok(())
}
fn eval_push(&mut self, expr: &Expr) -> StmtResult {
let expr = self.eval_expr(expr)?;
self.env.push(expr);
Ok(())
}
fn eval_print(&mut self, expr: &Expr) -> StmtResult {
let expr = self.eval_expr(expr)?;
println!("{}", expr);
Ok(())
}
fn eval_fncall(&mut self, ident: &str) -> StmtResult {
let function = self.env.get(ident)?;
let function = function.to_function()?;
self.env.new_scope();
for param in &function.params {
let popped = self.env.parent_pop()?;
self.env.set(param.to_string(), popped);
}
for stmt in &function.body {
self.eval_stmt(stmt)?;
}
let return_value = self.env.pop();
self.env.exit_scope();
if let Ok(value) = return_value {
self.env.push(value)
}
Ok(())
}
fn eval_pop(&mut self) -> StmtResult {
self.env.pop().map(|_| ())
}
}
|
use nb;
use core::{num::Wrapping};
use cortex_m::peripheral::{syst::SystClkSource, SYST};
//use crate::pmc::PMC;
//use cortex_m::peripheral::syst;
//pub use sam3x8e as target;
// https://doc.rust-lang.org/stable/rust-by-example/trait.html
// https://github.com/stm32-rs/stm32f4xx-hal/blob/master/src/timer.rs
// https://stackoverflow.com/questions/24047686/default-function-arguments-in-rust
pub trait BusyDelay {
fn busy_delay_us(&mut self, delay: u32);
fn busy_delay_ms(&mut self, delay: u32);
fn busy_delay_s(&mut self, delay: u32);
}
pub trait Delay {
fn delay_us(&mut self, delay: u32) -> MillisCountDown<Time>;
fn delay_ms(&mut self, delay: u32) -> MillisCountDown<Time>;
fn delay_s(&mut self, delay: u32) -> MillisCountDown<Time>;
}
/// Trait that abstracts a counter that increases as milliseconds go by.
///
/// Factored out to leave the door open for different SysTick counters, such as
/// counting via interrupts.
pub trait CountsMillis {
/// Returns a value that must not increment faster than once per
/// millisecond, and will wrap around.
fn count(&mut self) -> Wrapping<u32>;
}
pub struct Time {
//pub sys_countdown: cortex_m_systick_countdown::PollingSysTick,
pub syst: SYST,
pub ticks_per_ms: u32,
pub counter: Wrapping<u32>
//counter: cortex_m_systick_countdown::MillisCountDown,
}
pub struct MillisCountDown<'a, CM: CountsMillis> {
counter: &'a mut CM,
target_millis: Option<Wrapping<u32>>,
}
impl<'a, CM: CountsMillis> MillisCountDown<'a, CM> {
pub fn new(counter: &'a mut CM) -> Self {
MillisCountDown {
target_millis: None,
counter,
}
}
/// Underlying version of `CountDown`’s `start` that takes a `u32` of
/// milliseconds rather than a `Duration`.
///
/// Use this if you want to avoid the `u64`s in `Duration`.
pub fn start_ms(&mut self, ms: u32) {
self.target_millis = Some(self.counter.count() + Wrapping(ms));
}
/// Underlying implementation of `CountDown`’s `wait` that works directly on
/// our underlying u32 ms values and can be used by any `CountDown` trait
/// implementations.
///
/// Calling this method before `start`, or after it has already returned
/// `Ok` will panic.
pub fn wait_ms(&mut self) -> nb::Result<(), ()> {
// Rollover-safe duration check derived from:
// https://playground.arduino.cc/Code/TimingRollover/
if (self.counter.count() - self.target_millis.unwrap()).0 as i32 > 0 {
self.target_millis.take();
Ok(())
} else {
Err(nb::Error::WouldBlock)
}
}
}
impl BusyDelay for Time {
fn busy_delay_ms(&mut self, delay: u32) {
let mut counter = MillisCountDown::new(self);
counter.start_ms(delay);
nb::block!(counter.wait_ms()).unwrap();
}
fn busy_delay_us(&mut self, delay: u32) {
self.busy_delay_ms(delay / 1000);
}
fn busy_delay_s(&mut self, delay: u32) {
self.busy_delay_ms(delay * 100);
}
}
impl Delay for Time {
fn delay_ms(&mut self, delay: u32) -> MillisCountDown<Time> {
let mut counter = MillisCountDown::new(self);
counter.start_ms(delay);
counter
}
fn delay_us(&mut self, delay: u32) -> MillisCountDown<Time> {
self.delay_ms(delay / 1000)
}
fn delay_s(&mut self, delay: u32) -> MillisCountDown<Time> {
self.delay_ms(delay * 100)
}
}
pub fn get_calib_ticks_10ms() -> Option<u32> {
let calibrated_tick_value = cortex_m::peripheral::SYST::get_ticks_per_10ms();
if calibrated_tick_value == 0 {
None
} else {
// Leave one clock cycle for checking the overflow
// Source: https://github.com/fionawhim/cortex-m-systick-countdown/blob/develop/src/lib.rs
Some((calibrated_tick_value + 1) / 10 - 1)
}
} |
use super::super::TextureTable;
use super::util;
use crate::arena::{block, resource, BlockRef};
use crate::libs::random_id::U128Id;
use crate::libs::three;
use std::collections::{HashMap, HashSet};
use wasm_bindgen::JsCast;
pub struct Boxblock {
meshs: HashMap<U128Id, Mesh>,
geometry: Geometry,
geometry_nameplate: util::nameplate::XZGeometry,
}
pub struct Geometry {
box_geometry: three::BoxGeometry,
cylinder_geometry: three::CylinderGeometry,
icosahedron_geometry: three::IcosahedronGeometry,
slope_geometry: three::BufferGeometry,
}
struct Mesh {
boxblock_material: three::MeshStandardMaterial,
boxblock_data: three::Mesh,
texture_id: U128Id,
nameplate: util::Nameplate,
nameplate_id: (String, String),
color: crate::libs::color::Pallet,
data: three::Group,
}
impl Boxblock {
pub fn new() -> Self {
Self {
meshs: HashMap::new(),
geometry: Geometry::new(),
geometry_nameplate: util::nameplate::XZGeometry::new(0.5, true),
}
}
pub fn update(
&mut self,
texture_table: &mut TextureTable,
scene: &three::Scene,
boxblocks: impl Iterator<Item = BlockRef<block::Boxblock>>,
) {
let mut unused = self.meshs.keys().map(U128Id::clone).collect::<HashSet<_>>();
for boxblock in boxblocks {
let boxblock_id = boxblock.id();
unused.remove(&boxblock_id);
boxblock.map(|boxblock| {
if !self.meshs.contains_key(&boxblock_id) {
let boxblock_material = three::MeshStandardMaterial::new(&object! {});
let [r, g, b, ..] = boxblock.color().to_color().to_f64array();
boxblock_material.color().set_rgb(r, g, b);
let boxblock_data = three::Mesh::new(
self.geometry.get_geometry(boxblock.shape()),
&boxblock_material,
);
boxblock_data.set_user_data(&boxblock_id.to_jsvalue());
let nameplate = util::Nameplate::new(&self.geometry_nameplate);
nameplate.set_color(boxblock.color());
nameplate.scale().set(1.0, 1.0, 1.0);
nameplate.board().scale().set(0.0, 1.0, 0.0);
nameplate.arrow().unwrap().scale().set(0.0, 0.0, 0.0);
let data = three::Group::new();
data.add(&boxblock_data);
data.add(&nameplate);
data.set_render_order(super::ORDER_BOXBLOCK);
scene.add(&data);
self.meshs.insert(
U128Id::clone(&boxblock_id),
Mesh {
boxblock_material,
boxblock_data,
texture_id: U128Id::none(),
nameplate,
nameplate_id: (String::from(""), String::from("")),
color: boxblock.color().clone(),
data,
},
);
}
if let Some(mesh) = self.meshs.get_mut(&boxblock_id) {
mesh.boxblock_data
.set_geometry(self.geometry.get_geometry(boxblock.shape()));
let [px, py, pz] = boxblock.position().clone();
mesh.boxblock_data.position().set(px, py, pz);
let [sx, sy, sz] = boxblock.size().clone();
mesh.boxblock_data.scale().set(sx, sy, sz);
mesh.nameplate.position().set(px, py, pz + sz * 0.5);
let texture = boxblock.texture();
let texture_id = texture
.as_ref()
.map(|texture| texture.id())
.unwrap_or_else(|| U128Id::none());
if texture_id != mesh.texture_id {
if let Some(texture) = texture {
if let Some(texture) =
texture_table.load_block(BlockRef::clone(&texture))
{
mesh.boxblock_material.set_map(Some(&texture));
mesh.boxblock_material.color().set_rgb(1.0, 1.0, 1.0);
mesh.boxblock_material.set_needs_update(true);
mesh.texture_id = texture_id;
}
} else {
mesh.boxblock_material.set_map(None);
mesh.boxblock_material.set_needs_update(true);
mesh.texture_id = texture_id;
}
}
if *boxblock.color() != mesh.color {
if mesh.texture_id.is_none() {
let [r, g, b, ..] = boxblock.color().to_color().to_f64array();
mesh.boxblock_material.color().set_rgb(r, g, b);
mesh.boxblock_material.set_needs_update(true);
}
mesh.nameplate.set_color(boxblock.color());
mesh.color = boxblock.color().clone();
}
if *boxblock.display_name() != mesh.nameplate_id {
let texture = texture_table.load_text(boxblock.display_name());
mesh.nameplate.text().set_alpha_map(Some(&texture.data));
mesh.nameplate.text().set_needs_update(true);
let texture_width = texture.size[0] * 0.5;
let texture_height = texture_width * texture.size[1] / texture.size[0];
mesh.nameplate
.board()
.scale()
.set(texture_width, 1.0, texture_height);
if boxblock.display_name().0 == "" && boxblock.display_name().1 == "" {
mesh.nameplate.arrow().unwrap().scale().set(0.0, 0.0, 0.0);
} else if mesh.nameplate_id.0 == "" && mesh.nameplate_id.1 == "" {
mesh.nameplate.arrow().unwrap().scale().set(1.0, 1.0, 1.0);
}
mesh.nameplate_id = boxblock.display_name().clone();
}
}
});
}
for unused_boxblock_id in unused {
if let Some(mesh) = self.meshs.remove(&unused_boxblock_id) {
scene.remove(&mesh.data);
}
}
}
}
impl Geometry {
pub fn new() -> Self {
Self {
box_geometry: util::block_geometry::box_geometry(&mut crate::lazy),
cylinder_geometry: util::block_geometry::cylinder_geometry(&mut crate::lazy),
icosahedron_geometry: util::block_geometry::icosahedron_geometry(&mut crate::lazy),
slope_geometry: util::block_geometry::slope_geometry(&mut crate::lazy),
}
}
pub fn get_geometry(&self, shape: block::boxblock::Shape) -> &three::BufferGeometry {
match shape {
block::boxblock::Shape::Cube => &self.box_geometry,
block::boxblock::Shape::Cylinder => &self.cylinder_geometry,
block::boxblock::Shape::Sphere => &self.icosahedron_geometry,
block::boxblock::Shape::Slope => &self.slope_geometry,
}
}
}
|
#![allow(clippy::let_unit_value)]
use super::database::ingredient::ingredient_info;
use oikos_api::models::components::schemas::*;
use serde::{Deserialize, Serialize};
use uniqdb::{
github::{GithubDb, GithubDbError},
UniqDb,
};
#[derive(Debug, thiserror::Error)]
pub enum RecipeError {
#[error("recipe with id `{0}` not found")]
NotFound(String),
#[error("access token not found")]
AccessTokenNotFound(#[from] std::env::VarError),
#[error("unimplemented")]
UnimplementedError,
#[error("unknow error")]
UnknowError,
#[error("base64 decode error")]
DecodeError(#[from] base64::DecodeError),
#[error("utf8 error")]
Utf8Error(#[from] std::str::Utf8Error),
#[error("serde error")]
SerdeError(#[from] serde_json::error::Error),
#[error("invalid data {0}")]
InvalidDataError(String),
#[error("github error")]
GithubDbError(#[from] GithubDbError),
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
struct GithubRepo {
name: String,
description: Option<String>,
}
impl crate::server::Server {
pub fn get_recipes(&self, authorization: &str) -> Result<RecipeList, RecipeError> {
let db = GithubDb::new(authorization, "open-cooking")?;
let repo_list: Vec<GithubRepo> = db.get_all()?;
let recipe_list = repo_list
.iter()
.map(|recipe| RecipeListItem {
id: recipe.name.to_string(),
name: recipe
.description
.clone()
.map_or_else(|| recipe.name.to_string(), |value| value),
})
.collect();
Ok(recipe_list)
}
pub async fn add_recipe(
&self,
recipe: &RecipeModel,
authorization: &str,
) -> Result<RecipeModel, RecipeError> {
let db = GithubDb::new(authorization, "open-cooking")?;
db.create(&recipe.id, &recipe.name, recipe)?;
Ok(recipe.clone())
}
pub fn delete_recipe_by_id(
&self,
recipe_id: &str,
authorization: &str,
) -> Result<(), RecipeError> {
let db = GithubDb::new(authorization, "open-cooking")?;
<GithubDb as UniqDb<RecipeModel>>::delete(&db, recipe_id)?;
Ok(())
}
pub fn get_recipe_by_id(
&self,
recipe_id: &str,
authorization: &str,
) -> Result<RecipeModel, RecipeError> {
let db = GithubDb::new(authorization, "open-cooking")?;
let mut recipe: RecipeModel = db.get(recipe_id)?;
recipe.ingredients.iter_mut().for_each(|ingredient| {
if ingredient.icon.is_none() {
if let Some(info) = ingredient_info(&ingredient.name) {
ingredient.icon = info.icon.map(|text| text.to_string());
ingredient.category = Some(info.category.to_string());
}
}
});
Ok(recipe)
}
pub async fn update_recipe_by_id(
&self,
recipe_id: &str,
recipe: &RecipeModel,
authorization: &str,
) -> Result<RecipeModel, RecipeError> {
let db = GithubDb::new(authorization, "open-cooking")?;
let updated_recipe = db.update(recipe_id, recipe)?;
Ok(updated_recipe)
}
}
|
/* tshat - chat server for ssh clients
* Copyright 2021 Laurent Ghigonis <ooookiwi@gmail.com> */
extern crate thrussh;
extern crate thrussh_keys;
extern crate thrussh_libsodium;
extern crate futures;
extern crate tokio;
extern crate anyhow;
extern crate chrono;
extern crate clap;
extern crate regex;
extern crate fork;
extern crate data_encoding;
extern crate log;
extern crate env_logger;
extern crate zeroize;
extern crate md5;
use std::sync::{Mutex, Arc};
use std::str;
use std::mem;
use std::fmt;
use std::io::{self, Read, Write, BufReader, BufRead};
use std::fs::{File, OpenOptions};
use std::os::unix::fs::OpenOptionsExt;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use chrono::{DateTime, Local, TimeZone, NaiveDateTime, Timelike};
use regex::Regex;
use sodiumoxide::crypto::pwhash::argon2id13;
use thrussh_keys::PublicKeyBase64;
use log::{warn, info, debug};
use zeroize::{Zeroize, Zeroizing};
const HISTORY_MAX: usize = 100;
struct Tshat {
users: HashMap<String, Arc<User>>,
usersess_map: HashMap<(usize, thrussh::ChannelId), Arc<Mutex<UserSession>>>,
history: Option<History>,
keyfp: (String, String), // SHA256 and MD5
config: Arc<thrussh::server::Config>,
}
#[derive(Debug)]
enum EventType {
Text,
Connect,
Disconnect,
Command,
Server,
}
#[derive(Debug)]
struct Event {
evt: EventType,
time: DateTime<Local>,
nick: String,
text: String,
}
#[derive(Debug)]
struct User {
name: String,
passwd_any: bool,
passwd_hashes: Vec<argon2id13::HashedPassword>,
keys: Vec<thrussh_keys::key::PublicKey>,
conf: Mutex<UserConf>,
}
#[derive(Debug)]
struct UserConf {
bell: bool,
lastseen: DateTime<Local>,
active: u16,
}
/// Server handler from thrussh
#[derive(Clone)]
struct Handler {
conn_id: usize,
client_addr: Option<std::net::SocketAddr>,
user: Option<Arc<User>>,
auth_username: Option<String>,
tshat: Arc<Mutex<Tshat>>,
usersess: Option<Arc<Mutex<UserSession>>>,
}
struct UserSession {
//conn_id: usize,
client_addr: std::net::SocketAddr,
user: Arc<User>,
auth_username: String,
user_session_num: usize,
recv_buf: String,
cursor: u16,
sendbuf: String,
handle: thrussh::server::Handle,
channel: thrussh::ChannelId,
closing: bool,
eof: bool,
}
struct History {
events: Vec<Event>,
log: Option<File>,
}
fn main() -> Result<(), String> {
let args = clap::App::new("tshat")
.version("0.1")
.about("chat server for ssh clients")
.after_help("generate a password hash using 2GB of memory (stronger):
$ cat | tr -d '\\n' | argon2 $(openssl rand -base64 18) -id -m 21 -t 1 -e
generate a password hash using 64MB of memory:
$ cat | tr -d '\\n' | argon2 $(openssl rand -base64 18) -id -m 16 -t 3 -e
generate an ssh key:
$ ssh-keygen -f /tmp/mysshkey -t ed25519")
.setting(clap::AppSettings::ColorNever)
.arg(clap::Arg::new("debug")
.short('d')
.multiple_occurrences(true)
.about("do not daemonize and log to stdout (twice enables dependencies debugging)"))
.arg(clap::Arg::new("port")
.short('p')
.value_name("port")
.about("server port to bind, defaults to 2222"))
.arg(clap::Arg::new("logfile")
.short('l')
.value_name("logfile")
.about("chat log file, default to no file logging"))
.arg(clap::Arg::new("keyfile")
.short('k')
.value_name("keyfile")
.about("server key file, defaults to .tshat_key and .tshat_key.fp for fingerprint"))
.arg(clap::Arg::new("serverkeynofile")
.short('K')
.about("generate new server key and do not write key/fingerprint to file"))
.arg(clap::Arg::new("nohistory")
.short('L')
.about("do not remember any history"))
.arg(clap::Arg::new("users")
.required(true)
.multiple(true)
.about("- | <username>:'<password-hash>'|<ssh-pubkey>"))
.get_matches();
let (daemonize, loglevel, logmodule) = match args.occurrences_of("debug") {
0 => (true, log::LevelFilter::Info, Some("tshat")),
1 => (false, log::LevelFilter::Debug, Some("tshat")),
2 => (false, log::LevelFilter::Debug, None),
_ => return Err("-d can be specified only once or twice".to_string()),
};
env_logger::Builder::new()
.filter(logmodule, loglevel)
.format(|buf, record| {
let module = record.module_path().unwrap_or("");
match record.level() {
log::Level::Info => writeln!(buf, "{}", record.args()),
_ => writeln!(buf, "{}: {}: {}", record.level(), module, record.args()),
}
})
.init();
sodiumoxide::init().expect("failed to initialize sodiumoxide");
let keynofile = args.is_present("serverkeynofile");
let keyfile = args.value_of("keyfile");
if keynofile && keyfile.is_some() {
return Err("cannot specify -k and -K at the same time".to_string());
}
let keypath = if keynofile {
None
} else {
match keyfile {
Some(f) => Some(f),
None => Some(".tshat_key"),
}
};
let (users_map, auth_methods) = {
let mut stdin_buf = String::new();
let users_list: Vec<&str> = match args.value_of("users").unwrap() {
"-" => {
io::stdin().read_to_string(&mut stdin_buf).expect("could not read stdin");
stdin_buf.split('\n').collect()
},
_ => args.values_of("users").unwrap().collect(),
};
parse_users(users_list)?
};
let port: u32 = args.value_of_t("port").unwrap_or(2222);
let addr = format!("0.0.0.0:{}", port);
info!("listenning on {}", addr);
let logfile = args.value_of("logfile");
let nohistory = args.is_present("nohistory");
if nohistory && logfile.is_some() {
return Err("cannot specify -l and -L at the same time".to_string());
}
let tshat = Tshat::new(keypath, users_map, auth_methods, nohistory, logfile)?;
if daemonize {
match fork::daemon(false, false) {
Ok(fork::Fork::Child) => run(tshat, addr),
Ok(fork::Fork::Parent(_)) => Ok(()),
Err(e) => Err(format!("failed to daemonize: {}", e))
}
} else {
run(tshat, addr)
}
}
fn parse_users(users_list: Vec<&str>) -> Result<(HashMap<String, User>, thrussh::MethodSet), String> {
let mut users_map: HashMap<String, User> = HashMap::new();
let mut auth_methods = thrussh::MethodSet::empty();
for user in users_list {
debug!("parse user: {}", user);
if user.len() == 0 || user.starts_with("#") {
continue
}
let re = Regex::new(r#"^(?P<username>[0-9A-Za-z\*]+):['"]?(?P<auth>[0-9a-zA-Z,\$= \+/\*\-]+)['"]?$"#).unwrap();
match re.captures(user) {
Some(u) => {
let username = u.name("username").unwrap().as_str().to_string();
let mut suser = match users_map.entry(username.clone()) {
Entry::Occupied(o) => o.into_mut(),
Entry::Vacant(v) => v.insert(User::new(username.clone())),
};
match u.name("auth") {
Some(auth) => {
let auths = auth.as_str();
debug!("auth:{}", auths);
if auths == "*" {
if suser.passwd_hashes.len() > 0 {
return Err(format!("user {} cannot have any-password when password was specified before", user));
}
suser.passwd_any = true;
auth_methods |= thrussh::MethodSet::KEYBOARD_INTERACTIVE;
} else if auths.starts_with("$argon2id$") {
if suser.passwd_any {
return Err(format!("user {} cannot have password when any-password was specified before", user));
}
let mut pw = [0; 128];
auths.as_bytes()
.iter()
.enumerate()
.for_each(|(i, val)| pw[i] = *val);
let hp = argon2id13::HashedPassword(pw);
suser.passwd_hashes.push(hp);
auth_methods |= thrussh::MethodSet::PASSWORD;
} else if auths.starts_with("AAAA") {
if suser.passwd_any {
return Err(format!("user {} cannot set ssh-key when any-password was specified before", user));
}
match thrussh_keys::parse_public_key_base64(auths) {
Ok(pubkey) => suser.keys.push(pubkey),
Err(_) => return Err(format!("user {} has invalid public key", user)),
}
auth_methods |= thrussh::MethodSet::PUBLICKEY;
} else {
return Err(format!("user {} cannot parse user authentication entry", user))
}
},
None => return Err(format!("user {} has no valid password / ssh key", user))
}
},
None => return Err(format!("user {} cannot be parsed", user))
}
}
Ok((users_map, auth_methods))
}
fn run(tshat: Tshat, addr: String) -> Result<(), String> {
let config = Arc::clone(&tshat.config);
let handler = Handler {
conn_id: 0,
client_addr: None,
user: None,
auth_username: None,
tshat: Arc::new(Mutex::new(tshat)),
usersess: None,
};
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async {
debug!("starting ssh server");
match thrussh::server::run(config, &addr, handler).await {
Ok(_) => return Ok(()),
Err(e) => return Err(format!("error running ssh server: {}", e)),
}
})
}
/// unused for now
pub fn hash(passwd: &str) -> (String, argon2id13::HashedPassword) {
sodiumoxide::init().unwrap();
let hash = argon2id13::pwhash(
passwd.as_bytes(),
argon2id13::OPSLIMIT_INTERACTIVE,
argon2id13::MEMLIMIT_INTERACTIVE,
)
.unwrap();
let texthash = std::str::from_utf8(&hash.0).unwrap().to_string();
(texthash, hash)
}
/// XXX patch thrussh-keys
pub trait MD5Hash {
fn fingerprint_md5(&self) -> String;
}
impl MD5Hash for thrussh_keys::key::PublicKey {
fn fingerprint_md5(&self) -> String {
let key = self.public_key_bytes();
let mut c = md5::Context::new();
c.consume(&key[..]);
c.compute().into_iter()
.map(|x| format!("{:02x}", x))
.collect::<Vec<String>>()
.join(":")
}
}
impl User {
fn new(name: String) -> User {
User {
name: name,
passwd_any: false,
passwd_hashes: Vec::new(),
keys: Vec::new(),
conf: Mutex::new(UserConf {
bell: true,
lastseen: Local.timestamp(0, 0),
active: 0,
}),
}
}
fn auth_password(&self, passwd: &str) -> Result<(), String> {
if self.passwd_any {
return Ok(())
}
for hash in self.passwd_hashes.iter() {
if argon2id13::pwhash_verify(&hash, passwd.as_bytes()) {
return Ok(())
}
}
Err("invalid password".to_string())
}
fn auth_pubkey(&self, pubkey: &thrussh_keys::key::PublicKey) -> Result<(), String> {
for pk in self.keys.iter() {
if pubkey.public_key_bytes() == pk.public_key_bytes() {
return Ok(())
}
}
Err("unknown public key".to_string())
}
fn auth(&self, password: Option<&str>, pubkey: Option<&thrussh_keys::key::PublicKey>) -> Result<(), String> {
if password.is_none() && pubkey.is_none() {
match self.passwd_any {
true => Ok(()),
false => Err(format!("Authentication without password not allowed for user {}", self.name)),
}
} else if password.is_some() {
self.auth_password(password.unwrap())
} else if pubkey.is_some() {
self.auth_pubkey(pubkey.unwrap())
} else {
panic!("auth without arguments!");
}
}
fn get_active(&self) -> u16 {
let userconf = self.conf.lock().unwrap();
userconf.active
}
}
impl Event {
fn new(evt: EventType, mut time: Option<DateTime<Local>>, nick: String, text: &str) -> Event {
if time.is_none() {
time = Some(Local::now().with_nanosecond(0).unwrap());
}
Event {
evt: evt,
time: time.unwrap(),
nick: nick,
text: text.to_string().replace(&['\r', '\n'][..], ""),
}
}
fn parse(s: &str) -> Result<Event, String> {
let (evt, re) = {
if let Some(ev) = Regex::new(r#"^(?P<time>[0-9_]+) <(?P<nick>[^>]+)> (?P<text>.*)$"#).unwrap().captures(s) {
(EventType::Text, ev)
} else if let Some(ev) = Regex::new(r#"^(?P<time>[0-9_]+) (?P<nick>[^ ]+) connected (?P<text>.*)$"#).unwrap().captures(s) {
(EventType::Connect, ev)
} else if let Some(ev) = Regex::new(r#"^(?P<time>[0-9_]+) (?P<nick>[^ ]+) disconnected (?P<text>.*)$"#).unwrap().captures(s) {
(EventType::Disconnect, ev)
} else if let Some(ev) = Regex::new(r#"^(?P<time>[0-9_]+) (?P<nick>[^ ]+) (?P<text>/.*)$"#).unwrap().captures(s) {
(EventType::Command, ev)
} else if let Some(ev) = Regex::new(r#"^(?P<time>[0-9_]+) >>> tshat server (?P<text>.*)"#).unwrap().captures(s) {
(EventType::Server, ev)
} else {
return Err(format!("cannot parse event: {}", s))
}
};
let time_parsed = &NaiveDateTime::parse_from_str(re.name("time").unwrap().as_str(), "%Y%m%d_%H%M%S")
.map_err(|_| format!("cannot parse event time : {}", s))?;
let time = Local.from_local_datetime(&time_parsed).unwrap();
let nick = match re.name("nick") {
Some(n) => n.as_str(),
None => "",
};
Ok(Event::new(evt, Some(time), nick.to_string(), re.name("text").unwrap().as_str()))
}
}
impl Zeroize for Event {
fn zeroize(&mut self) {
self.nick.zeroize();
self.text.zeroize();
}
}
impl fmt::Display for Event {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.evt {
EventType::Text => write!(f, "{} <{}> {}\r\n", self.time.format("%Y%m%d_%H%M%S"), self.nick, self.text),
EventType::Connect => write!(f, "{} {} connected {}\r\n", self.time.format("%Y%m%d_%H%M%S"), self.nick, self.text),
EventType::Disconnect => write!(f, "{} {} disconnected {}\r\n", self.time.format("%Y%m%d_%H%M%S"), self.nick, self.text),
EventType::Command => write!(f, "{} {} {}\r\n", self.time.format("%Y%m%d_%H%M%S"), self.nick, self.text),
EventType::Server => write!(f, "{} >>> tshat server {}\r\n", self.time.format("%Y%m%d_%H%M%S"), self.text),
}
}
}
impl Tshat {
fn new(keypath: Option<&str>, users_map: HashMap<String, User>, auth_methods: thrussh::MethodSet, nohistory: bool, logfile: Option<&str>) -> Result<Tshat, String> {
/* generate or read server keys */
let secretkey = match keypath {
Some(keypath) => {
match File::open(keypath) {
Ok(mut keyfile) => {
info!("using existing ssh server key from {}", keypath);
let mut buf = [0; 64];
keyfile.read(&mut buf).expect("could not read from key file");
thrussh_libsodium::ed25519::SecretKey { key: buf }
},
Err(_) => {
match OpenOptions::new().mode(0o600).write(true).create(true).open(keypath) {
Ok(mut keyfile) => {
info!("generating new ssh server key and writing to {}", keypath);
let (_, secretkey) = thrussh_libsodium::ed25519::keypair();
keyfile.write(&secretkey.key).expect("could not write to key file");
secretkey
}
Err(_why) => return Err(format!("could not open key file for writing generated key: {}", keypath)),
}
},
}
},
None => {
info!("generating temporary ssh server key");
let (_, secretkey) = thrussh_libsodium::ed25519::keypair();
secretkey
}
};
let keypair = thrussh_keys::key::KeyPair::Ed25519(secretkey.clone());
/* generate and store server key fingerprint */
let keyfp_sha256 = format!("SHA256:{}", keypair.clone_public_key().fingerprint());
let keyfp_md5 = format!("MD5:{}", keypair.clone_public_key().fingerprint_md5());
let keyfp_txt = format!("server ED25519 fingerprint {}\nserver ED25519 fingerprint {}", keyfp_sha256, keyfp_md5);
info!("{}", keyfp_txt);
if let Some(keypath) = keypath {
let fppath = format!("{}.fp", keypath);
if let Ok(mut fpfile) = OpenOptions::new().mode(0o600).write(true).create(true).open(&fppath) {
let keyfp_txtn = format!("{}\n", keyfp_txt);
fpfile.write(keyfp_txtn.as_bytes()).expect("could not write to key fingerprint file");
} else {
return Err(format!("could not open key fingerprint file for writing: {}", fppath));
}
}
/* prepare users map */
let mut users = HashMap::new();
for (username, suser) in users_map {
users.insert(username, Arc::new(suser));
}
debug!("users: {:?}", users.keys());
/* initialize history */
let history = match nohistory {
true => None,
false => {
let mut history = History::new();
if let Some(logfile) = logfile {
history.load_log(logfile)?;
history.update_userconf(&users);
}
history.push(Event::new(EventType::Server, None, "".to_string(), "startup"));
Some(history)
},
};
/* create ssh server configuration */
let mut config = thrussh::server::Config::default();
let keypair = thrussh_keys::key::KeyPair::Ed25519(secretkey.clone());
config.connection_timeout = None; // by default openssh does not send keepalives
config.auth_rejection_time = std::time::Duration::from_secs(2);
config.server_id = "SSH-2.0--".to_string();
config.methods = auth_methods;
config.keys.push(keypair);
/* create Tshat */
Ok(Tshat {
users: users,
usersess_map: HashMap::new(),
history: history,
keyfp: (keyfp_sha256, keyfp_md5),
config: Arc::new(config),
})
}
/// attempts authentication of a user to the server, using password or public key
/// if the user is not found, the wildcard user is also tried if present
fn auth(&self, user: &str, password: Option<&str>, pubkey: Option<&thrussh_keys::key::PublicKey>) -> Result<Arc<User>, String> {
let suser = match self.users.contains_key(user) {
true => self.users.get(user),
false => self.users.get("*"),
};
match suser {
Some(suser) => {
match suser.auth(password, pubkey) {
Ok(_) => Ok(suser.clone()),
Err(e) => Err(e),
}
},
None => Err("user does not exist".to_string()),
}
}
/// returns an empty session number for users authenticated under a specific user name
/// it is used to create the nickname of a user when multiple users have the same name
fn get_user_session_num(&self, username: &str) -> usize {
let used_nums: Vec<usize> = self.usersess_map.values().into_iter()
.filter_map(|usersess| {
let usersess = usersess.lock().unwrap();
match usersess.auth_username == username {
true => Some(usersess.user_session_num),
false => None,
}
}).collect();
for i in 0..mem::size_of::<usize>() {
if used_nums.iter().find(|&&x| i == x).is_none() {
return i;
}
}
panic!("exausted user_session_num");
}
}
impl History {
fn new() -> History{
History {
events: Vec::new(),
log: None,
}
}
fn load_log(&mut self, path: &str) -> Result<(), String> {
debug!("History load_log");
let log = match OpenOptions::new().mode(0o600).read(true).append(true).create(true).open(path) {
Err(why) => return Err(format!("couldn't open log file {}: {}", path, why)),
Ok(file) => file,
};
let mut reader = BufReader::new(log);
let mut buffer = String::new();
while let Ok(len) = reader.read_line(&mut buffer) {
if len == 0 {
break;
}
match Event::parse(&buffer[..len-1]) {
Ok(event) => self.push(event),
Err(e) => warn!("History load_log parse event failed: {}", e),
}
buffer.clear();
}
self.log = Some(reader.into_inner());
Ok(())
}
fn push(&mut self, event: Event) {
if let Some(mut log) = self.log.as_ref() {
log.write_all(event.to_string().as_bytes()).expect("History push write failed");
};
self.events.push(event);
}
/// read history log and update users configuration
fn update_userconf(&self, users: &HashMap<String, Arc<User>>) {
for ev in &self.events {
if let Some(user) = &users.get(&ev.nick) {
let mut userconf = user.conf.lock().unwrap();
/* update last seen */
if ev.time > userconf.lastseen {
userconf.lastseen = ev.time;
}
/* update bell */
if matches!(ev.evt, EventType::Command) {
match ev.text.as_str() {
"/bell" => userconf.bell = true,
"/nobell" => userconf.bell = false,
_ => (),
}
}
}
}
}
}
impl thrussh::server::Server for Handler {
type Handler = Self;
fn new(&mut self, addr: Option<std::net::SocketAddr>) -> Self {
// XXX check conn_id overflow
let mut s = self.clone();
self.conn_id += 1;
s.client_addr = addr;
debug!("Handler new self.conn_id={} s.id={}", self.conn_id, s.conn_id);
s
}
}
impl thrussh::server::Handler for Handler {
type Error = anyhow::Error;
type FutureAuth = futures::future::Ready<Result<(Self, thrussh::server::Auth), anyhow::Error>>;
type FutureUnit = futures::future::Ready<Result<(Self, thrussh::server::Session), anyhow::Error>>;
type FutureBool = futures::future::Ready<Result<(Self, thrussh::server::Session, bool), anyhow::Error>>;
fn finished_auth(self, auth: thrussh::server::Auth) -> Self::FutureAuth {
futures::future::ready(Ok((self, auth)))
}
fn finished_bool(self, b: bool, s: thrussh::server::Session) -> Self::FutureBool {
futures::future::ready(Ok((self, s, b)))
}
fn finished(self, s: thrussh::server::Session) -> Self::FutureUnit {
futures::future::ready(Ok((self, s)))
}
fn auth_keyboard_interactive(mut self, user: &str, submethods: &str, response: Option<thrussh::server::Response>) -> Self::FutureAuth {
debug!("XXX auth_keyboard_interactive user={} submethods={} response={:?}", user, submethods, response);
let res = {
let tshat = self.tshat.lock().expect("internal error: cannot lock on tshat");
tshat.auth(user, None, None)
};
match res {
Ok(suser) => {
debug!("auth_keyboard_interactive: authenticated successfully for user '{}'", user);
self.user = Some(suser);
self.auth_username = Some(user.to_string());
self.finished_auth(thrussh::server::Auth::Accept)
},
Err(e) => {
debug!("auth_keyboard_interactive: rejected user '{}' : {}", user, e);
self.finished_auth(thrussh::server::Auth::Reject)
},
}
}
fn auth_publickey(mut self, user: &str, pubkey: &thrussh_keys::key::PublicKey) -> Self::FutureAuth {
let res = {
let tshat = self.tshat.lock().expect("internal error: cannot lock on tshat");
tshat.auth(user, None, Some(pubkey))
};
match res {
Ok(suser) => {
debug!("auth_publickey: authenticated successfully for user '{}' pubkey '{}': {:?}", user, pubkey.fingerprint(), pubkey);
self.user = Some(suser);
self.auth_username = Some(user.to_string());
self.finished_auth(thrussh::server::Auth::Accept)
},
Err(e) => {
debug!("auth_publickey: rejected user '{}' pubkey '{}' {:?} : {}", user, pubkey.fingerprint(), pubkey, e);
self.finished_auth(thrussh::server::Auth::Reject)
},
}
}
fn auth_password(mut self, user: &str, password: &str) -> Self::FutureAuth {
let res = {
let tshat = self.tshat.lock().expect("internal error: cannot lock on tshat");
tshat.auth(user, Some(password), None)
};
match res {
Ok(suser) => {
debug!("auth_password: authenticated successfully for user '{}'", user);
self.user = Some(suser);
self.auth_username = Some(user.to_string());
self.finished_auth(thrussh::server::Auth::Accept)
},
Err(e) => {
debug!("auth_password: rejected user '{}' : {}", user, e);
self.finished_auth(thrussh::server::Auth::Reject)
},
}
}
fn channel_open_session(mut self, channel: thrussh::ChannelId, session: thrussh::server::Session) -> Self::FutureUnit {
match self.user {
Some(ref u) => {
debug!("channel_open_session: {} from {} : conn_id={} channel={:?}", u.name, self.client_addr.unwrap().to_string(), self.conn_id, channel);
if let Ok(mut tshat) = self.tshat.lock() {
if tshat.usersess_map.contains_key(&(self.conn_id, channel)) {
warn!("channel already open !");
} else {
if let Some(ref auth_username) = self.auth_username {
let mut usersess = UserSession::new(self.conn_id, self.client_addr.unwrap(), u.clone(), auth_username, tshat.get_user_session_num(auth_username), channel, session.handle());
let event = Event::new(EventType::Connect, None, usersess.nick(), &format!("from {}", usersess.client_addr.to_string()));
for ((_, _), us) in tshat.usersess_map.iter_mut() {
let mut us = us.lock().unwrap();
us.sendbuf_prompt_hide();
us.sendbuf_push_line(&event);
us.sendbuf_prompt_restore();
us.sendbuf_send();
}
if let Some(ref mut history) = tshat.history {
history.push(event);
}
usersess.connect();
usersess.sendbuf_push_welcome(&tshat.users);
let lastseen = {
match usersess.user.name.as_str() {
"*" => None,
_ => Some(usersess.user.conf.lock().unwrap().lastseen),
}
};
if let Some(ref mut history) = tshat.history {
usersess.sendbuf_push_history(&history, lastseen, true);
}
usersess.sendbuf_push_prompt();
usersess.sendbuf_send();
usersess.update_lastseen();
let usersess = Arc::new(Mutex::new(usersess));
self.usersess = Some(usersess.clone());
tshat.usersess_map.insert((self.conn_id, channel), usersess.clone());
} else {
warn!("channel_open_session auth_username not set");
}
}
} else {
panic!("cannot lock tshat");
}
},
None => {
warn!("channel_open_session has no user in session");
}
};
self.finished(session)
}
fn data(self, channel: thrussh::ChannelId, data: &[u8], session: thrussh::server::Session) -> Self::FutureUnit {
let mut did_broadcast = false;
debug!("data from user {:?} [{}-{:?}] : {:02X?} = {}", self.auth_username.as_ref(), self.conn_id, channel, data, str::from_utf8(data).unwrap().to_string());
if let Ok(mut tshat) = self.tshat.lock() {
if let Some(usersess) = self.usersess.as_ref() {
if let Ok(mut usersess) = usersess.lock() {
for c in data {
match *c {
0x20..=0x7e => {
/* printable character */
usersess.sendbuf.push(*c as char);
usersess.recv_buf.push(*c as char);
usersess.cursor += 1;
},
0x0d => {
/* enter */
if usersess.recv_buf.chars().nth(0) == Some('/') {
/* command */
let event = Event::new(EventType::Command, None, usersess.nick(), &usersess.recv_buf.clone());
if let Some(ref mut history) = tshat.history {
history.push(event);
}
match usersess.recv_buf.as_str() {
"/help" => {
if tshat.history.is_some() {
usersess.sendbuf.push_str("\r\n/history print all chat history");
}
if usersess.user.name != "*" {
usersess.sendbuf.push_str("\r\n/bell enable message bell notification");
usersess.sendbuf.push_str("\r\n/nobell disable message bell notification");
usersess.sendbuf.push_str("\r\n/conf show user configuration");
}
usersess.sendbuf.push_str("\r\n/users list allowed users and active connections");
usersess.sendbuf.push_str("\r\n/fp show server key fingerprint");
usersess.sendbuf.push_str("\r\n/quit exit chat (shortcut: ctrl-d)");
usersess.sendbuf.push_str("\r\n");
},
"/history" => {
usersess.sendbuf.push_str("\r\n");
if let Some(ref mut history) = tshat.history {
usersess.sendbuf_push_history(&history, None, false);
}
},
"/bell" => {
usersess.sendbuf.push_str("\r\n");
if usersess.user.name != "*" {
usersess.sendbuf.push_str("bell enabled\r\n");
let mut userconf = usersess.user.conf.lock().unwrap();
userconf.bell = true;
}
},
"/nobell" => {
usersess.sendbuf.push_str("\r\n");
if usersess.user.name != "*" {
usersess.sendbuf.push_str("bell disabled\r\n");
let mut userconf = usersess.user.conf.lock().unwrap();
userconf.bell = false;
}
},
"/conf" => {
usersess.sendbuf.push_str("\r\n");
if usersess.user.name != "*" {
let (bell, lastseen) = {
let userconf = usersess.user.conf.lock().unwrap();
(userconf.bell, userconf.lastseen)
};
let s = format!(concat!("user {} configuration:\r\n",
"bell: {}\r\n",
"lastseen: {}\r\n"), usersess.user.name, bell, lastseen.format("%Y%m%d_%H%M%S"));
usersess.sendbuf.push_str(&s);
}
},
"/users" => {
usersess.sendbuf.push_str("\r\n");
usersess.sendbuf_push_users(&tshat.users);
},
"/fp" => {
let s = format!(concat!("\r\nserver ED25519 fingerprint {}\r\n",
"server ED25519 fingerprint {}\r\n"), &tshat.keyfp.0, &tshat.keyfp.1);
usersess.sendbuf.push_str(&s);
},
"/quit" => {
usersess.sendbuf.push_str("\r\ngoodbye\r\n");
usersess.closing = true;
},
_ => {
usersess.sendbuf.push_str("\r\ncommand not understood\r\n");
},
}
} else {
/* normal text */
let event = Event::new(EventType::Text, None, usersess.nick(), &usersess.recv_buf.clone());
/* print text locally */
usersess.sendbuf_prompt_hide();
usersess.sendbuf_push_line(&event);
/* broadcast text */
for ((us_connid, us_channel), us) in tshat.usersess_map.iter_mut() {
if !(*us_connid == self.conn_id && *us_channel == channel) {
let mut us = us.lock().unwrap();
us.sendbuf_prompt_hide();
us.sendbuf_push_line(&event);
if us.user.conf.lock().unwrap().bell {
us.sendbuf_push_bell();
}
us.sendbuf_prompt_restore();
}
}
/* store text in history */
if let Some(ref mut history) = tshat.history {
history.push(event);
}
did_broadcast = true;
}
/* new prompt */
usersess.sendbuf_push_prompt();
},
0x7f => {
/* del */
if usersess.cursor > 2 {
usersess.sendbuf.push_str("\x08 \x08");
usersess.recv_buf.pop();
usersess.cursor -= 1;
}
},
0x03 => {
/* ctr-c */
usersess.sendbuf_prompt_hide();
usersess.sendbuf_push_prompt();
},
0x04 => {
/* ctr-d */
if usersess.cursor == 2 {
usersess.sendbuf.push_str("\r\ngoodbye\r\n");
usersess.closing = true;
}
},
_ => (),
}
}
usersess.sendbuf_send();
usersess.update_lastseen();
}
}
if did_broadcast {
for ((us_connid, us_channel), us) in tshat.usersess_map.iter_mut() {
if !(*us_connid == self.conn_id && *us_channel == channel) {
let mut us = us.lock().unwrap();
us.sendbuf_send();
}
}
}
} else {
panic!("cannot lock tshat");
}
self.finished(session)
}
fn extended_data(self, _channel: thrussh::ChannelId, _ext: u32, _data: &[u8], session: thrussh::server::Session) -> Self::FutureUnit {
debug!("extended_data from user {:?}", self.user.as_ref());
self.finished(session)
}
fn pty_request(
self,
channel: thrussh::ChannelId,
term: &str,
_col_width: u32,
_row_height: u32,
_pix_width: u32,
_pix_height: u32,
modes: &[(thrussh::Pty, u32)],
mut session: thrussh::server::Session,
) -> Self::FutureUnit {
debug!("pty_request {} {:?}", term, modes);
// we don't want to be rude and prevent ssh some client to connect
// (like Termius)
session.channel_success(channel);
self.finished(session)
}
fn shell_request(self, channel: thrussh::ChannelId, mut session: thrussh::server::Session) -> Self::FutureUnit {
debug!("shell_request");
// we don't want to be rude and prevent ssh some client to connect
// (like Termius)
session.channel_success(channel);
self.finished(session)
}
fn channel_close(mut self, channel: thrussh::ChannelId, session: thrussh::server::Session) -> Self::FutureUnit {
debug!("channel_close from user {:?} {} {:?}", self.user.as_ref(), self.conn_id, channel);
if let Ok(mut tshat) = self.tshat.lock() {
if let Some(usersess) = self.usersess {
let mut usersess = usersess.lock().unwrap();
let reason = match usersess.eof {
true => "(timeout)",
false => {
usersess.update_lastseen();
""
}
};
usersess.disconnect();
let event = Event::new(EventType::Disconnect, None, usersess.nick(), reason);
for ((us_connid, us_channel), us) in tshat.usersess_map.iter_mut() {
if !(*us_connid == self.conn_id && *us_channel == channel) {
let mut us = us.lock().unwrap();
us.sendbuf_prompt_hide();
us.sendbuf_push_line(&event);
us.sendbuf_prompt_restore();
us.sendbuf_send();
}
}
if let Some(ref mut history) = tshat.history {
history.push(event);
}
tshat.usersess_map.remove(&(self.conn_id, channel));
self.usersess = None;
} else {
warn!("channel_close() called on non-existent user session : {} {:?}", self.conn_id, channel);
}
} else {
panic!("cannot lock tshat");
}
self.finished(session)
}
fn channel_eof(self, _channel: thrussh::ChannelId, session: thrussh::server::Session) -> Self::FutureUnit {
debug!("channel_eof from user {:?}", self.user.as_ref());
if let Some(ref usersess) = self.usersess {
let mut usersess = usersess.lock().unwrap();
usersess.eof = true;
}
self.finished(session)
}
}
impl UserSession {
fn new(_conn_id: usize, client_addr: std::net::SocketAddr, user: Arc<User>, auth_username: &str, user_session_num: usize, channel: thrussh::ChannelId, handle: thrussh::server::Handle) -> UserSession {
UserSession {
//conn_id: conn_id,
client_addr: client_addr,
user: user.clone(),
auth_username: auth_username.to_string(),
user_session_num: user_session_num,
recv_buf: String::new(),
cursor: 0,
sendbuf: String::new(),
channel: channel,
handle: handle,
closing: false,
eof: false,
}
}
/// generate a nickname for a connecting user
fn nick(&self) -> String {
let mut nick = String::new();
if self.user.name.as_str() == "*" {
nick.push_str("*");
};
nick.push_str(&self.auth_username);
if self.user_session_num > 0 {
nick.push_str(&format!("({})", self.user_session_num));
}
nick
}
fn update_lastseen(&self) {
let mut userconf = self.user.conf.lock().unwrap();
userconf.lastseen = Local::now().with_nanosecond(0).unwrap();
}
/// XXX very ugly way to send to other clients by getting entering the tokio runtime mannually to send data
/// see https://nest.pijul.com/pijul/thrussh/discussions/38#69cdeb44-99b5-4b78-8a48-6ea2f9fcce8f
//fn asyncsim_send(handle: &thrussh::server::Handle, channel: thrussh::ChannelId, bytes: &[u8], mut close_after_send: bool) {
fn asyncsim_send(handle: &thrussh::server::Handle, channel: thrussh::ChannelId, buf: thrussh::CryptoVec, mut close_after_send: bool) {
let tokiohandle = tokio::runtime::Handle::current();
let mut sess = handle.clone();
tokiohandle.spawn(async move {
match sess.data(channel, buf).await {
Ok(_) => (),
Err(_) => {
debug!("detected error while send, closing connection");
close_after_send = true;
},
}
if close_after_send {
match sess.close(channel).await {
Ok(_) => (),
Err(_) => warn!("detected error while close"),
// XXX communicate with Tshat to indicate that user did disconnect
}
}
});
}
fn sendbuf_push_line(&mut self, line: &Event) {
self.sendbuf.push_str(&Zeroizing::new(line.to_string()));
}
fn sendbuf_prompt_hide(&mut self) {
for _ in 0..(self.cursor+2) {
self.sendbuf.push_str("\x08 \x08");
}
}
fn sendbuf_prompt_restore(&mut self) {
self.sendbuf.push_str("> ");
self.sendbuf.push_str(&self.recv_buf);
}
fn sendbuf_push_history(&mut self, history: &History, since: Option<DateTime<Local>>, truncate: bool) {
/* print a header above history */
let intro = match since {
Some(since) => {
if since == Local.timestamp(0, 0) {
"history since server startup:\r\n".to_string()
} else {
format!("history since last seen at {}:\r\n", since.format("%Y%m%d_%H%M%S"))
}
},
None => format!("history since server startup:\r\n"),
};
self.sendbuf.push_str(&intro);
/* build a list of events we should show */
let mut events = Vec::new();
for ev in &history.events {
if since.is_none() || (ev.time >= since.unwrap()) {
match ev.evt {
EventType::Command => (),
_ => events.push(ev),
}
}
}
/* limit events count to HISTORY_MAX */
let from = if truncate && events.len() >= HISTORY_MAX {
events.len() - HISTORY_MAX
} else {
0
};
if from > 0 {
let s = format!("[... {} events above ...]\r\n", from);
self.sendbuf.push_str(&s);
}
/* show the events */
for ev in &events[from..] {
self.sendbuf.push_str(&Zeroizing::new(ev.to_string()));
}
}
fn sendbuf_push_welcome(&mut self, users: &HashMap<String, Arc<User>>) {
self.sendbuf.push_str(">>> welcome ");
self.sendbuf.push_str(&self.nick());
self.sendbuf.push_str("\r\n>>> ");
self.sendbuf_push_users(users);
self.sendbuf.push_str(">>> type /help to list available commands\r\n");
}
fn sendbuf_push_users(&mut self, users: &HashMap<String, Arc<User>>) {
let mut s = String::new();
self.sendbuf.push_str("users allowed in the room: ");
for (_, u) in users {
let us = format!("{}[{}] ", u.name, u.clone().get_active());
s.push_str(&us);
}
self.sendbuf.push_str(&s);
self.sendbuf.push_str("\r\n");
}
fn sendbuf_push_bell(&mut self) {
self.sendbuf.push_str("\x07");
}
fn sendbuf_push_prompt(&mut self) {
self.recv_buf.zeroize();
self.recv_buf.clear();
self.sendbuf.push_str("> ");
self.cursor = 2;
}
fn sendbuf_send(&mut self) {
if self.sendbuf.len() > 0 {
UserSession::asyncsim_send(&self.handle, self.channel, thrussh::CryptoVec::from_slice(self.sendbuf.as_bytes()), self.closing);
self.sendbuf.zeroize();
self.sendbuf.clear();
}
}
fn connect(&mut self) {
let mut userconf = self.user.conf.lock().unwrap();
userconf.active += 1;
}
fn disconnect(&mut self) {
let mut userconf = self.user.conf.lock().unwrap();
userconf.active -= 1;
}
}
|
mod editor;
mod env_editor;
|
use std::{thread, time};
use std::io::{self, Write};
trait Printable {
fn set_printer_name(&mut self, name: String);
fn get_printer_name(&self) -> String;
fn print(&mut self, string: String);
}
struct Printer {
name: String,
}
impl Printer {
fn new(name: String) -> Printer {
Printer::heaby_job(format!("Printerのインスタンス({})を生成中", name));
Printer {
name: name,
}
}
fn heaby_job(msg: String) {
print!("{}", msg);
io::stdout().flush().unwrap();
for _ in 0..5 {
thread::sleep(time::Duration::from_millis(1000));
print!(".");
io::stdout().flush().unwrap();
}
println!("完了。");
}
}
impl Printable for Printer {
fn set_printer_name(&mut self, name: String) {
self.name = name;
}
fn get_printer_name(&self) -> String {
self.name.clone()
}
fn print(&mut self, string: String) {
println!("=== {} ===", self.name);
println!("{}", string);
}
}
struct PrinterProxy {
name: String,
real: Option<Box<Printable>>,
}
impl PrinterProxy {
fn new(name: String) -> PrinterProxy {
PrinterProxy {
name: name,
real: None,
}
}
fn realize(&mut self) {
if self.real.is_none() {
self.real = Some(Box::new(Printer::new(self.name.clone())));
}
}
}
impl Printable for PrinterProxy {
fn set_printer_name(&mut self, name: String) {
if self.real.is_some() {
match self.real.as_mut() {
Some(real) => real.set_printer_name(name.clone()),
None => {},
}
}
self.name = name;
}
fn get_printer_name(&self) -> String {
self.name.clone()
}
fn print(&mut self, string: String) {
self.realize();
match self.real.as_mut() {
Some(real) => real.print(string),
None => {},
}
}
}
fn main() {
let mut p = PrinterProxy::new("Alice".to_string());
println!("名前は現在{}です。", p.get_printer_name());
p.set_printer_name("Bob".to_string());
println!("名前は現在{}です。", p.get_printer_name());
p.print("Hello, world.".to_string());
}
|
#[doc = "Register `GTZC1_TZIC_IER3` reader"]
pub type R = crate::R<GTZC1_TZIC_IER3_SPEC>;
#[doc = "Register `GTZC1_TZIC_IER3` writer"]
pub type W = crate::W<GTZC1_TZIC_IER3_SPEC>;
#[doc = "Field `LPTIM6IE` reader - illegal access interrupt enable for LPTIM6"]
pub type LPTIM6IE_R = crate::BitReader;
#[doc = "Field `LPTIM6IE` writer - illegal access interrupt enable for LPTIM6"]
pub type LPTIM6IE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `VREFBUFIE` reader - illegal access interrupt enable for VREFBUF"]
pub type VREFBUFIE_R = crate::BitReader;
#[doc = "Field `VREFBUFIE` writer - illegal access interrupt enable for VREFBUF"]
pub type VREFBUFIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CRCIE` reader - illegal access interrupt enable for CRC"]
pub type CRCIE_R = crate::BitReader;
#[doc = "Field `CRCIE` writer - illegal access interrupt enable for CRC"]
pub type CRCIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CORDICIE` reader - illegal access interrupt enable for CORDIC"]
pub type CORDICIE_R = crate::BitReader;
#[doc = "Field `CORDICIE` writer - illegal access interrupt enable for CORDIC"]
pub type CORDICIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FMACIE` reader - illegal access interrupt enable for FMAC"]
pub type FMACIE_R = crate::BitReader;
#[doc = "Field `FMACIE` writer - illegal access interrupt enable for FMAC"]
pub type FMACIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ETHIE` reader - illegal access interrupt enable for register of ETH"]
pub type ETHIE_R = crate::BitReader;
#[doc = "Field `ETHIE` writer - illegal access interrupt enable for register of ETH"]
pub type ETHIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ICACHEIE` reader - illegal access interrupt enable for ICACHE"]
pub type ICACHEIE_R = crate::BitReader;
#[doc = "Field `ICACHEIE` writer - illegal access interrupt enable for ICACHE"]
pub type ICACHEIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DCACHEIE` reader - illegal access interrupt enable for DCACHE"]
pub type DCACHEIE_R = crate::BitReader;
#[doc = "Field `DCACHEIE` writer - illegal access interrupt enable for DCACHE"]
pub type DCACHEIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ADC12IE` reader - illegal access interrupt enable for ADC1 and ADC2"]
pub type ADC12IE_R = crate::BitReader;
#[doc = "Field `ADC12IE` writer - illegal access interrupt enable for ADC1 and ADC2"]
pub type ADC12IE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DCMIIE` reader - illegal access interrupt enable for DCMI"]
pub type DCMIIE_R = crate::BitReader;
#[doc = "Field `DCMIIE` writer - illegal access interrupt enable for DCMI"]
pub type DCMIIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `AESIE` reader - illegal access interrupt enable for AES"]
pub type AESIE_R = crate::BitReader;
#[doc = "Field `AESIE` writer - illegal access interrupt enable for AES"]
pub type AESIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `HASHIE` reader - illegal access interrupt enable for HASH"]
pub type HASHIE_R = crate::BitReader;
#[doc = "Field `HASHIE` writer - illegal access interrupt enable for HASH"]
pub type HASHIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RNGIE` reader - illegal access interrupt enable for RNG"]
pub type RNGIE_R = crate::BitReader;
#[doc = "Field `RNGIE` writer - illegal access interrupt enable for RNG"]
pub type RNGIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SAESIE` reader - illegal access interrupt enable for SAES"]
pub type SAESIE_R = crate::BitReader;
#[doc = "Field `SAESIE` writer - illegal access interrupt enable for SAES"]
pub type SAESIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `PKAIE` reader - illegal access interrupt enable for PKA"]
pub type PKAIE_R = crate::BitReader;
#[doc = "Field `PKAIE` writer - illegal access interrupt enable for PKA"]
pub type PKAIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SDMMC2IE` reader - illegal access interrupt enable for SDMMC2"]
pub type SDMMC2IE_R = crate::BitReader;
#[doc = "Field `SDMMC2IE` writer - illegal access interrupt enable for SDMMC2"]
pub type SDMMC2IE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SDMMC1IE` reader - illegal access interrupt enable for SDMMC1"]
pub type SDMMC1IE_R = crate::BitReader;
#[doc = "Field `SDMMC1IE` writer - illegal access interrupt enable for SDMMC1"]
pub type SDMMC1IE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FMCIE` reader - illegal access interrupt enable for FMC"]
pub type FMCIE_R = crate::BitReader;
#[doc = "Field `FMCIE` writer - illegal access interrupt enable for FMC"]
pub type FMCIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OCTOSPI1IE` reader - illegal access interrupt enable for OCTOSPI1"]
pub type OCTOSPI1IE_R = crate::BitReader;
#[doc = "Field `OCTOSPI1IE` writer - illegal access interrupt enable for OCTOSPI1"]
pub type OCTOSPI1IE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RAMCFGIE` reader - illegal access interrupt enable for RAMSCFG"]
pub type RAMCFGIE_R = crate::BitReader;
#[doc = "Field `RAMCFGIE` writer - illegal access interrupt enable for RAMSCFG"]
pub type RAMCFGIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - illegal access interrupt enable for LPTIM6"]
#[inline(always)]
pub fn lptim6ie(&self) -> LPTIM6IE_R {
LPTIM6IE_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - illegal access interrupt enable for VREFBUF"]
#[inline(always)]
pub fn vrefbufie(&self) -> VREFBUFIE_R {
VREFBUFIE_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 8 - illegal access interrupt enable for CRC"]
#[inline(always)]
pub fn crcie(&self) -> CRCIE_R {
CRCIE_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - illegal access interrupt enable for CORDIC"]
#[inline(always)]
pub fn cordicie(&self) -> CORDICIE_R {
CORDICIE_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - illegal access interrupt enable for FMAC"]
#[inline(always)]
pub fn fmacie(&self) -> FMACIE_R {
FMACIE_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - illegal access interrupt enable for register of ETH"]
#[inline(always)]
pub fn ethie(&self) -> ETHIE_R {
ETHIE_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - illegal access interrupt enable for ICACHE"]
#[inline(always)]
pub fn icacheie(&self) -> ICACHEIE_R {
ICACHEIE_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 13 - illegal access interrupt enable for DCACHE"]
#[inline(always)]
pub fn dcacheie(&self) -> DCACHEIE_R {
DCACHEIE_R::new(((self.bits >> 13) & 1) != 0)
}
#[doc = "Bit 14 - illegal access interrupt enable for ADC1 and ADC2"]
#[inline(always)]
pub fn adc12ie(&self) -> ADC12IE_R {
ADC12IE_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 15 - illegal access interrupt enable for DCMI"]
#[inline(always)]
pub fn dcmiie(&self) -> DCMIIE_R {
DCMIIE_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bit 16 - illegal access interrupt enable for AES"]
#[inline(always)]
pub fn aesie(&self) -> AESIE_R {
AESIE_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bit 17 - illegal access interrupt enable for HASH"]
#[inline(always)]
pub fn hashie(&self) -> HASHIE_R {
HASHIE_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 18 - illegal access interrupt enable for RNG"]
#[inline(always)]
pub fn rngie(&self) -> RNGIE_R {
RNGIE_R::new(((self.bits >> 18) & 1) != 0)
}
#[doc = "Bit 19 - illegal access interrupt enable for SAES"]
#[inline(always)]
pub fn saesie(&self) -> SAESIE_R {
SAESIE_R::new(((self.bits >> 19) & 1) != 0)
}
#[doc = "Bit 20 - illegal access interrupt enable for PKA"]
#[inline(always)]
pub fn pkaie(&self) -> PKAIE_R {
PKAIE_R::new(((self.bits >> 20) & 1) != 0)
}
#[doc = "Bit 21 - illegal access interrupt enable for SDMMC2"]
#[inline(always)]
pub fn sdmmc2ie(&self) -> SDMMC2IE_R {
SDMMC2IE_R::new(((self.bits >> 21) & 1) != 0)
}
#[doc = "Bit 22 - illegal access interrupt enable for SDMMC1"]
#[inline(always)]
pub fn sdmmc1ie(&self) -> SDMMC1IE_R {
SDMMC1IE_R::new(((self.bits >> 22) & 1) != 0)
}
#[doc = "Bit 23 - illegal access interrupt enable for FMC"]
#[inline(always)]
pub fn fmcie(&self) -> FMCIE_R {
FMCIE_R::new(((self.bits >> 23) & 1) != 0)
}
#[doc = "Bit 24 - illegal access interrupt enable for OCTOSPI1"]
#[inline(always)]
pub fn octospi1ie(&self) -> OCTOSPI1IE_R {
OCTOSPI1IE_R::new(((self.bits >> 24) & 1) != 0)
}
#[doc = "Bit 26 - illegal access interrupt enable for RAMSCFG"]
#[inline(always)]
pub fn ramcfgie(&self) -> RAMCFGIE_R {
RAMCFGIE_R::new(((self.bits >> 26) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - illegal access interrupt enable for LPTIM6"]
#[inline(always)]
#[must_use]
pub fn lptim6ie(&mut self) -> LPTIM6IE_W<GTZC1_TZIC_IER3_SPEC, 0> {
LPTIM6IE_W::new(self)
}
#[doc = "Bit 1 - illegal access interrupt enable for VREFBUF"]
#[inline(always)]
#[must_use]
pub fn vrefbufie(&mut self) -> VREFBUFIE_W<GTZC1_TZIC_IER3_SPEC, 1> {
VREFBUFIE_W::new(self)
}
#[doc = "Bit 8 - illegal access interrupt enable for CRC"]
#[inline(always)]
#[must_use]
pub fn crcie(&mut self) -> CRCIE_W<GTZC1_TZIC_IER3_SPEC, 8> {
CRCIE_W::new(self)
}
#[doc = "Bit 9 - illegal access interrupt enable for CORDIC"]
#[inline(always)]
#[must_use]
pub fn cordicie(&mut self) -> CORDICIE_W<GTZC1_TZIC_IER3_SPEC, 9> {
CORDICIE_W::new(self)
}
#[doc = "Bit 10 - illegal access interrupt enable for FMAC"]
#[inline(always)]
#[must_use]
pub fn fmacie(&mut self) -> FMACIE_W<GTZC1_TZIC_IER3_SPEC, 10> {
FMACIE_W::new(self)
}
#[doc = "Bit 11 - illegal access interrupt enable for register of ETH"]
#[inline(always)]
#[must_use]
pub fn ethie(&mut self) -> ETHIE_W<GTZC1_TZIC_IER3_SPEC, 11> {
ETHIE_W::new(self)
}
#[doc = "Bit 12 - illegal access interrupt enable for ICACHE"]
#[inline(always)]
#[must_use]
pub fn icacheie(&mut self) -> ICACHEIE_W<GTZC1_TZIC_IER3_SPEC, 12> {
ICACHEIE_W::new(self)
}
#[doc = "Bit 13 - illegal access interrupt enable for DCACHE"]
#[inline(always)]
#[must_use]
pub fn dcacheie(&mut self) -> DCACHEIE_W<GTZC1_TZIC_IER3_SPEC, 13> {
DCACHEIE_W::new(self)
}
#[doc = "Bit 14 - illegal access interrupt enable for ADC1 and ADC2"]
#[inline(always)]
#[must_use]
pub fn adc12ie(&mut self) -> ADC12IE_W<GTZC1_TZIC_IER3_SPEC, 14> {
ADC12IE_W::new(self)
}
#[doc = "Bit 15 - illegal access interrupt enable for DCMI"]
#[inline(always)]
#[must_use]
pub fn dcmiie(&mut self) -> DCMIIE_W<GTZC1_TZIC_IER3_SPEC, 15> {
DCMIIE_W::new(self)
}
#[doc = "Bit 16 - illegal access interrupt enable for AES"]
#[inline(always)]
#[must_use]
pub fn aesie(&mut self) -> AESIE_W<GTZC1_TZIC_IER3_SPEC, 16> {
AESIE_W::new(self)
}
#[doc = "Bit 17 - illegal access interrupt enable for HASH"]
#[inline(always)]
#[must_use]
pub fn hashie(&mut self) -> HASHIE_W<GTZC1_TZIC_IER3_SPEC, 17> {
HASHIE_W::new(self)
}
#[doc = "Bit 18 - illegal access interrupt enable for RNG"]
#[inline(always)]
#[must_use]
pub fn rngie(&mut self) -> RNGIE_W<GTZC1_TZIC_IER3_SPEC, 18> {
RNGIE_W::new(self)
}
#[doc = "Bit 19 - illegal access interrupt enable for SAES"]
#[inline(always)]
#[must_use]
pub fn saesie(&mut self) -> SAESIE_W<GTZC1_TZIC_IER3_SPEC, 19> {
SAESIE_W::new(self)
}
#[doc = "Bit 20 - illegal access interrupt enable for PKA"]
#[inline(always)]
#[must_use]
pub fn pkaie(&mut self) -> PKAIE_W<GTZC1_TZIC_IER3_SPEC, 20> {
PKAIE_W::new(self)
}
#[doc = "Bit 21 - illegal access interrupt enable for SDMMC2"]
#[inline(always)]
#[must_use]
pub fn sdmmc2ie(&mut self) -> SDMMC2IE_W<GTZC1_TZIC_IER3_SPEC, 21> {
SDMMC2IE_W::new(self)
}
#[doc = "Bit 22 - illegal access interrupt enable for SDMMC1"]
#[inline(always)]
#[must_use]
pub fn sdmmc1ie(&mut self) -> SDMMC1IE_W<GTZC1_TZIC_IER3_SPEC, 22> {
SDMMC1IE_W::new(self)
}
#[doc = "Bit 23 - illegal access interrupt enable for FMC"]
#[inline(always)]
#[must_use]
pub fn fmcie(&mut self) -> FMCIE_W<GTZC1_TZIC_IER3_SPEC, 23> {
FMCIE_W::new(self)
}
#[doc = "Bit 24 - illegal access interrupt enable for OCTOSPI1"]
#[inline(always)]
#[must_use]
pub fn octospi1ie(&mut self) -> OCTOSPI1IE_W<GTZC1_TZIC_IER3_SPEC, 24> {
OCTOSPI1IE_W::new(self)
}
#[doc = "Bit 26 - illegal access interrupt enable for RAMSCFG"]
#[inline(always)]
#[must_use]
pub fn ramcfgie(&mut self) -> RAMCFGIE_W<GTZC1_TZIC_IER3_SPEC, 26> {
RAMCFGIE_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "GTZC1 TZIC interrupt enable register 3\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gtzc1_tzic_ier3::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gtzc1_tzic_ier3::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct GTZC1_TZIC_IER3_SPEC;
impl crate::RegisterSpec for GTZC1_TZIC_IER3_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`gtzc1_tzic_ier3::R`](R) reader structure"]
impl crate::Readable for GTZC1_TZIC_IER3_SPEC {}
#[doc = "`write(|w| ..)` method takes [`gtzc1_tzic_ier3::W`](W) writer structure"]
impl crate::Writable for GTZC1_TZIC_IER3_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets GTZC1_TZIC_IER3 to value 0"]
impl crate::Resettable for GTZC1_TZIC_IER3_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#[doc = "Register `HWCFGR1` reader"]
pub type R = crate::R<HWCFGR1_SPEC>;
#[doc = "Register `HWCFGR1` writer"]
pub type W = crate::W<HWCFGR1_SPEC>;
#[doc = "Field `CFG1` reader - LUART hardware configuration 1"]
pub type CFG1_R = crate::FieldReader;
#[doc = "Field `CFG1` writer - LUART hardware configuration 1"]
pub type CFG1_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `CFG2` reader - LUART hardware configuration 2"]
pub type CFG2_R = crate::FieldReader;
#[doc = "Field `CFG2` writer - LUART hardware configuration 2"]
pub type CFG2_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `CFG3` reader - LUART hardware configuration 1"]
pub type CFG3_R = crate::FieldReader;
#[doc = "Field `CFG3` writer - LUART hardware configuration 1"]
pub type CFG3_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `CFG4` reader - LUART hardware configuration 2"]
pub type CFG4_R = crate::FieldReader;
#[doc = "Field `CFG4` writer - LUART hardware configuration 2"]
pub type CFG4_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `CFG5` reader - LUART hardware configuration 2"]
pub type CFG5_R = crate::FieldReader;
#[doc = "Field `CFG5` writer - LUART hardware configuration 2"]
pub type CFG5_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `CFG6` reader - LUART hardware configuration 2"]
pub type CFG6_R = crate::FieldReader;
#[doc = "Field `CFG6` writer - LUART hardware configuration 2"]
pub type CFG6_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `CFG7` reader - LUART hardware configuration 2"]
pub type CFG7_R = crate::FieldReader;
#[doc = "Field `CFG7` writer - LUART hardware configuration 2"]
pub type CFG7_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `CFG8` reader - LUART hardware configuration 2"]
pub type CFG8_R = crate::FieldReader;
#[doc = "Field `CFG8` writer - LUART hardware configuration 2"]
pub type CFG8_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
impl R {
#[doc = "Bits 0:3 - LUART hardware configuration 1"]
#[inline(always)]
pub fn cfg1(&self) -> CFG1_R {
CFG1_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bits 4:7 - LUART hardware configuration 2"]
#[inline(always)]
pub fn cfg2(&self) -> CFG2_R {
CFG2_R::new(((self.bits >> 4) & 0x0f) as u8)
}
#[doc = "Bits 8:11 - LUART hardware configuration 1"]
#[inline(always)]
pub fn cfg3(&self) -> CFG3_R {
CFG3_R::new(((self.bits >> 8) & 0x0f) as u8)
}
#[doc = "Bits 12:15 - LUART hardware configuration 2"]
#[inline(always)]
pub fn cfg4(&self) -> CFG4_R {
CFG4_R::new(((self.bits >> 12) & 0x0f) as u8)
}
#[doc = "Bits 16:19 - LUART hardware configuration 2"]
#[inline(always)]
pub fn cfg5(&self) -> CFG5_R {
CFG5_R::new(((self.bits >> 16) & 0x0f) as u8)
}
#[doc = "Bits 20:23 - LUART hardware configuration 2"]
#[inline(always)]
pub fn cfg6(&self) -> CFG6_R {
CFG6_R::new(((self.bits >> 20) & 0x0f) as u8)
}
#[doc = "Bits 24:27 - LUART hardware configuration 2"]
#[inline(always)]
pub fn cfg7(&self) -> CFG7_R {
CFG7_R::new(((self.bits >> 24) & 0x0f) as u8)
}
#[doc = "Bits 28:31 - LUART hardware configuration 2"]
#[inline(always)]
pub fn cfg8(&self) -> CFG8_R {
CFG8_R::new(((self.bits >> 28) & 0x0f) as u8)
}
}
impl W {
#[doc = "Bits 0:3 - LUART hardware configuration 1"]
#[inline(always)]
#[must_use]
pub fn cfg1(&mut self) -> CFG1_W<HWCFGR1_SPEC, 0> {
CFG1_W::new(self)
}
#[doc = "Bits 4:7 - LUART hardware configuration 2"]
#[inline(always)]
#[must_use]
pub fn cfg2(&mut self) -> CFG2_W<HWCFGR1_SPEC, 4> {
CFG2_W::new(self)
}
#[doc = "Bits 8:11 - LUART hardware configuration 1"]
#[inline(always)]
#[must_use]
pub fn cfg3(&mut self) -> CFG3_W<HWCFGR1_SPEC, 8> {
CFG3_W::new(self)
}
#[doc = "Bits 12:15 - LUART hardware configuration 2"]
#[inline(always)]
#[must_use]
pub fn cfg4(&mut self) -> CFG4_W<HWCFGR1_SPEC, 12> {
CFG4_W::new(self)
}
#[doc = "Bits 16:19 - LUART hardware configuration 2"]
#[inline(always)]
#[must_use]
pub fn cfg5(&mut self) -> CFG5_W<HWCFGR1_SPEC, 16> {
CFG5_W::new(self)
}
#[doc = "Bits 20:23 - LUART hardware configuration 2"]
#[inline(always)]
#[must_use]
pub fn cfg6(&mut self) -> CFG6_W<HWCFGR1_SPEC, 20> {
CFG6_W::new(self)
}
#[doc = "Bits 24:27 - LUART hardware configuration 2"]
#[inline(always)]
#[must_use]
pub fn cfg7(&mut self) -> CFG7_W<HWCFGR1_SPEC, 24> {
CFG7_W::new(self)
}
#[doc = "Bits 28:31 - LUART hardware configuration 2"]
#[inline(always)]
#[must_use]
pub fn cfg8(&mut self) -> CFG8_W<HWCFGR1_SPEC, 28> {
CFG8_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "LPUART Hardware Configuration register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hwcfgr1::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`hwcfgr1::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct HWCFGR1_SPEC;
impl crate::RegisterSpec for HWCFGR1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`hwcfgr1::R`](R) reader structure"]
impl crate::Readable for HWCFGR1_SPEC {}
#[doc = "`write(|w| ..)` method takes [`hwcfgr1::W`](W) writer structure"]
impl crate::Writable for HWCFGR1_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets HWCFGR1 to value 0x3110_0000"]
impl crate::Resettable for HWCFGR1_SPEC {
const RESET_VALUE: Self::Ux = 0x3110_0000;
}
|
// This file is part of dpdk. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/dpdk/master/COPYRIGHT. No part of dpdk, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2017 The developers of dpdk. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/dpdk/master/COPYRIGHT.
pub struct EventChannelVerbMapEntry<'a>
{
protectionDomain: *mut ibv_pd,
extendedReliableConnectionDomain: *mut ibv_xrcd,
completionChannel: Box<EPollContextChoice<'a>>,
}
impl<'a> Drop for EventChannelVerbMapEntry<'a>
{
#[inline(always)]
fn drop(&mut self)
{
self.extendedReliableConnectionDomain.destroy();
self.protectionDomain.destroy();
}
}
impl<'a> VerbMapEntry<'a> for EventChannelVerbMapEntry<'a>
{
type ConstructionParameters = EPoll<EPollContextChoice<'a>>;
#[inline(always)]
fn create(constructionParameters: &'a Self::ConstructionParameters, verbs: *mut ibv_context) -> Self
{
Self
{
protectionDomain: verbs.allocateProtectionDomain(),
extendedReliableConnectionDomain: verbs.createExtendedReliableConnectionDomainWithoutInode(),
completionChannel: CompletionChannel::new(constructionParameters, verbs),
}
}
}
impl<'a> EventChannelVerbMapEntry<'a>
{
#[inline(always)]
pub fn createCompletionQueue(&mut self, atLeastThisNumberOfCompletionQueueEvents: u32, completionQueueContext: *mut c_void)
{
match *self.completionChannel
{
EPollContextChoice::CompletionChannel(ref mut context) => context.createCompletionQueue(atLeastThisNumberOfCompletionQueueEvents, completionQueueContext),
_ => (),
}
}
#[inline(always)]
pub fn createExtendedCompletionQueue(&mut self, atLeastThisNumberOfCompletionQueueEvents: u32, completionQueueContext: *mut c_void, workCompletionFlags: ibv_create_cq_wc_flags, lockLessButNotThreadSafe: bool)
{
match *self.completionChannel
{
EPollContextChoice::CompletionChannel(ref mut context) => context.createExtendedCompletionQueue(atLeastThisNumberOfCompletionQueueEvents, completionQueueContext, workCompletionFlags, lockLessButNotThreadSafe),
_ => (),
}
}
// TODO: Must call ibv_destroy_qp()
/// maximumTsoHeaderSize is only supported By Mellanox mlx5 drivers
/// I suspect scatterFrameCheckSequence and cvLanStripping is likewise only support by mlx5
#[inline(always)]
pub fn createReliablyConnectedExtendedQueuePair(&mut self, queuePairContext: *mut c_void, sendCompletionQueue: *mut ibv_cq, receiveCompletionQueue: *mut ibv_cq, eachWorkRequestSubmittedToTheSendQueueGeneratesACompletionEntry: bool, queuePairCapabilities: &QueuePairCapabilities, scatterFrameCheckSequence: bool, cvLanStripping: bool, maximumTsoHeaderSize: u16) -> (*mut ibv_qp, QueuePairCapabilities)
{
debug_assert!(!sendCompletionQueue.is_null(), "sendCompletionQueue is null");
debug_assert!(!receiveCompletionQueue.is_null(), "receiveCompletionQueue is null");
let verbs = self.protectionDomain.verbs();
#[allow(dead_code)] const IBV_QP_CREATE_BLOCK_SELF_MCAST_LB: u32 = 2;
const IBV_QP_CREATE_SCATTER_FCS: u32 = 256;
const IBV_QP_CREATE_CVLAN_STRIPPING: u32 = 512;
let mut creationFlags = if unlikely(scatterFrameCheckSequence)
{
IBV_QP_CREATE_SCATTER_FCS
}
else
{
0
};
if unlikely(cvLanStripping)
{
creationFlags |= IBV_QP_CREATE_CVLAN_STRIPPING;
}
const IBV_QP_INIT_ATTR_PD: u32 = 1;
const IBV_QP_INIT_ATTR_XRCD: u32 = 2;
const IBV_QP_INIT_ATTR_CREATE_FLAGS: u32 = 4;
const IBV_QP_INIT_ATTR_MAX_TSO_HEADER: u32 = 8;
#[allow(dead_code)] const IBV_QP_INIT_ATTR_IND_TABLE: u32 = 16;
#[allow(dead_code)] const IBV_QP_INIT_ATTR_RX_HASH: u32 = 32;
let mut compMask = IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_XRCD | IBV_QP_INIT_ATTR_CREATE_FLAGS;
if maximumTsoHeaderSize > 0
{
compMask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
}
let mut attributes = ibv_qp_init_attr_ex
{
qp_context: queuePairContext,
send_cq: sendCompletionQueue,
recv_cq: receiveCompletionQueue,
srq: null_mut(),
cap: queuePairCapabilities.as_ibv_qp_cap(),
sq_sig_all: if unlikely(eachWorkRequestSubmittedToTheSendQueueGeneratesACompletionEntry)
{
1
}
else
{
0
},
qp_type: ibv_qp_type::IBV_QPT_RC, // also IBV_QPT_XRC_SEND and IBV_QPT_XRC_RECV
comp_mask: compMask,
pd: self.protectionDomain,
xrcd: self.extendedReliableConnectionDomain,
create_flags: creationFlags,
max_tso_header: maximumTsoHeaderSize,
rwq_ind_tbl: null_mut(),
rx_hash_conf: ibv_rx_hash_conf
{
rx_hash_function: 0,
rx_hash_key_len: 0,
rx_hash_key: null_mut(),
rx_hash_fields_mask: 0,
},
};
let queuePairPointer = panic_on_null!(rust_ibv_create_qp_ex, verbs, &mut attributes);
(queuePairPointer, QueuePairCapabilities::from(&attributes))
}
}
|
#![no_std]
#![no_main]
extern crate memdmp;
#[no_mangle]
pub unsafe extern fn wmain(_argc: isize, _argv: *const *const u16) -> isize {
memdmp::shellcode()
}
|
// 一个包含资源的结构体,它实现了 `Clone` trait
#[derive(Clone, Debug)]
struct Pair(Box<i32>, Box<i32>);
// 定义一个不含资源的单元结构体Nil
#[derive(Debug, Clone, Copy, PartialEq)]
struct Nil;
fn main() {
copy();
move_pair();
clone();
}
/* 变量复制
*/
fn copy() {
// 初始化结构体变量
let x = Nil;
// 复制变量x到y(没有发生移动)
let y = x;
// 下面变量x和y可以独立调用
assert_eq!(x, Nil);
assert_eq!(y, Nil);
}
/* 变量移动
*/
fn move_pair() {
// 实例化 `Pair`
let x = Pair(Box::new(1), Box::new(2));
println!("original: {:?}", x);
// 这里x被绑定到y, 资源所有权发生了移动
let y = x;
println!("copy: {:?}", y);
// println!("{:?}", x);
// 所有权已经转移, 编译报错
}
fn clone() {
// 实例化 `Pair`
let pair = Pair(Box::new(1), Box::new(2));
// 克隆对象资源pair到 `cloned`。
let cloned = pair.clone();
// 使用 std::mem::drop 来销毁原始的 pair。
drop(pair);
// clone对象依然合法
assert_eq!(*cloned.0, 1);
assert_eq!(*cloned.1, 2);
}
/* 注: 函数drop()来自std::mem::drop, 用来销毁对象
*/
|
#![allow(missing_docs)]
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use bytes::{Bytes, BytesMut};
use fallible_iterator::FallibleIterator;
use memchr::memchr;
use std::cmp;
use std::io::{self, Read};
use std::ops::Range;
use std::str;
use crate::{Lsn, Oid};
// top-level message tags
pub const PARSE_COMPLETE_TAG: u8 = b'1';
pub const BIND_COMPLETE_TAG: u8 = b'2';
pub const CLOSE_COMPLETE_TAG: u8 = b'3';
pub const NOTIFICATION_RESPONSE_TAG: u8 = b'A';
pub const COPY_DONE_TAG: u8 = b'c';
pub const COMMAND_COMPLETE_TAG: u8 = b'C';
pub const COPY_DATA_TAG: u8 = b'd';
pub const DATA_ROW_TAG: u8 = b'D';
pub const ERROR_RESPONSE_TAG: u8 = b'E';
pub const COPY_IN_RESPONSE_TAG: u8 = b'G';
pub const COPY_OUT_RESPONSE_TAG: u8 = b'H';
pub const COPY_BOTH_RESPONSE_TAG: u8 = b'W';
pub const EMPTY_QUERY_RESPONSE_TAG: u8 = b'I';
pub const BACKEND_KEY_DATA_TAG: u8 = b'K';
pub const NO_DATA_TAG: u8 = b'n';
pub const NOTICE_RESPONSE_TAG: u8 = b'N';
pub const AUTHENTICATION_TAG: u8 = b'R';
pub const PORTAL_SUSPENDED_TAG: u8 = b's';
pub const PARAMETER_STATUS_TAG: u8 = b'S';
pub const PARAMETER_DESCRIPTION_TAG: u8 = b't';
pub const ROW_DESCRIPTION_TAG: u8 = b'T';
pub const READY_FOR_QUERY_TAG: u8 = b'Z';
// replication message tags
pub const XLOG_DATA_TAG: u8 = b'w';
pub const PRIMARY_KEEPALIVE_TAG: u8 = b'k';
// logical replication message tags
const BEGIN_TAG: u8 = b'B';
const COMMIT_TAG: u8 = b'C';
const ORIGIN_TAG: u8 = b'O';
const RELATION_TAG: u8 = b'R';
const TYPE_TAG: u8 = b'Y';
const INSERT_TAG: u8 = b'I';
const UPDATE_TAG: u8 = b'U';
const DELETE_TAG: u8 = b'D';
const TRUNCATE_TAG: u8 = b'T';
const TUPLE_NEW_TAG: u8 = b'N';
const TUPLE_KEY_TAG: u8 = b'K';
const TUPLE_OLD_TAG: u8 = b'O';
const TUPLE_DATA_NULL_TAG: u8 = b'n';
const TUPLE_DATA_TOAST_TAG: u8 = b'u';
const TUPLE_DATA_TEXT_TAG: u8 = b't';
// replica identity tags
const REPLICA_IDENTITY_DEFAULT_TAG: u8 = b'd';
const REPLICA_IDENTITY_NOTHING_TAG: u8 = b'n';
const REPLICA_IDENTITY_FULL_TAG: u8 = b'f';
const REPLICA_IDENTITY_INDEX_TAG: u8 = b'i';
#[derive(Debug, Copy, Clone)]
pub struct Header {
tag: u8,
len: i32,
}
#[allow(clippy::len_without_is_empty)]
impl Header {
#[inline]
pub fn parse(buf: &[u8]) -> io::Result<Option<Header>> {
if buf.len() < 5 {
return Ok(None);
}
let tag = buf[0];
let len = BigEndian::read_i32(&buf[1..]);
if len < 4 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"invalid message length: header length < 4",
));
}
Ok(Some(Header { tag, len }))
}
#[inline]
pub fn tag(self) -> u8 {
self.tag
}
#[inline]
pub fn len(self) -> i32 {
self.len
}
}
/// An enum representing Postgres backend messages.
#[non_exhaustive]
pub enum Message {
AuthenticationCleartextPassword,
AuthenticationGss,
AuthenticationKerberosV5,
AuthenticationMd5Password(AuthenticationMd5PasswordBody),
AuthenticationOk,
AuthenticationScmCredential,
AuthenticationSspi,
AuthenticationGssContinue(AuthenticationGssContinueBody),
AuthenticationSasl(AuthenticationSaslBody),
AuthenticationSaslContinue(AuthenticationSaslContinueBody),
AuthenticationSaslFinal(AuthenticationSaslFinalBody),
BackendKeyData(BackendKeyDataBody),
BindComplete,
CloseComplete,
CommandComplete(CommandCompleteBody),
CopyData(CopyDataBody),
CopyDone,
CopyInResponse(CopyInResponseBody),
CopyOutResponse(CopyOutResponseBody),
CopyBothResponse(CopyBothResponseBody),
DataRow(DataRowBody),
EmptyQueryResponse,
ErrorResponse(ErrorResponseBody),
NoData,
NoticeResponse(NoticeResponseBody),
NotificationResponse(NotificationResponseBody),
ParameterDescription(ParameterDescriptionBody),
ParameterStatus(ParameterStatusBody),
ParseComplete,
PortalSuspended,
ReadyForQuery(ReadyForQueryBody),
RowDescription(RowDescriptionBody),
}
impl Message {
#[inline]
pub fn parse(buf: &mut BytesMut) -> io::Result<Option<Message>> {
if buf.len() < 5 {
let to_read = 5 - buf.len();
buf.reserve(to_read);
return Ok(None);
}
let tag = buf[0];
let len = (&buf[1..5]).read_u32::<BigEndian>().unwrap();
if len < 4 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: parsing u32",
));
}
let total_len = len as usize + 1;
if buf.len() < total_len {
let to_read = total_len - buf.len();
buf.reserve(to_read);
return Ok(None);
}
let mut buf = Buffer {
bytes: buf.split_to(total_len).freeze(),
idx: 5,
};
let message = match tag {
PARSE_COMPLETE_TAG => Message::ParseComplete,
BIND_COMPLETE_TAG => Message::BindComplete,
CLOSE_COMPLETE_TAG => Message::CloseComplete,
NOTIFICATION_RESPONSE_TAG => {
let process_id = buf.read_i32::<BigEndian>()?;
let channel = buf.read_cstr()?;
let message = buf.read_cstr()?;
Message::NotificationResponse(NotificationResponseBody {
process_id,
channel,
message,
})
}
COPY_DONE_TAG => Message::CopyDone,
COMMAND_COMPLETE_TAG => {
let tag = buf.read_cstr()?;
Message::CommandComplete(CommandCompleteBody { tag })
}
COPY_DATA_TAG => {
let storage = buf.read_all();
Message::CopyData(CopyDataBody { storage })
}
DATA_ROW_TAG => {
let len = buf.read_u16::<BigEndian>()?;
let storage = buf.read_all();
Message::DataRow(DataRowBody { storage, len })
}
ERROR_RESPONSE_TAG => {
let storage = buf.read_all();
Message::ErrorResponse(ErrorResponseBody { storage })
}
COPY_IN_RESPONSE_TAG => {
let format = buf.read_u8()?;
let len = buf.read_u16::<BigEndian>()?;
let storage = buf.read_all();
Message::CopyInResponse(CopyInResponseBody {
format,
len,
storage,
})
}
COPY_OUT_RESPONSE_TAG => {
let format = buf.read_u8()?;
let len = buf.read_u16::<BigEndian>()?;
let storage = buf.read_all();
Message::CopyOutResponse(CopyOutResponseBody {
format,
len,
storage,
})
}
COPY_BOTH_RESPONSE_TAG => {
let format = buf.read_u8()?;
let len = buf.read_u16::<BigEndian>()?;
let storage = buf.read_all();
Message::CopyBothResponse(CopyBothResponseBody {
format,
len,
storage,
})
}
EMPTY_QUERY_RESPONSE_TAG => Message::EmptyQueryResponse,
BACKEND_KEY_DATA_TAG => {
let process_id = buf.read_i32::<BigEndian>()?;
let secret_key = buf.read_i32::<BigEndian>()?;
Message::BackendKeyData(BackendKeyDataBody {
process_id,
secret_key,
})
}
NO_DATA_TAG => Message::NoData,
NOTICE_RESPONSE_TAG => {
let storage = buf.read_all();
Message::NoticeResponse(NoticeResponseBody { storage })
}
AUTHENTICATION_TAG => match buf.read_i32::<BigEndian>()? {
0 => Message::AuthenticationOk,
2 => Message::AuthenticationKerberosV5,
3 => Message::AuthenticationCleartextPassword,
5 => {
let mut salt = [0; 4];
buf.read_exact(&mut salt)?;
Message::AuthenticationMd5Password(AuthenticationMd5PasswordBody { salt })
}
6 => Message::AuthenticationScmCredential,
7 => Message::AuthenticationGss,
8 => {
let storage = buf.read_all();
Message::AuthenticationGssContinue(AuthenticationGssContinueBody(storage))
}
9 => Message::AuthenticationSspi,
10 => {
let storage = buf.read_all();
Message::AuthenticationSasl(AuthenticationSaslBody(storage))
}
11 => {
let storage = buf.read_all();
Message::AuthenticationSaslContinue(AuthenticationSaslContinueBody(storage))
}
12 => {
let storage = buf.read_all();
Message::AuthenticationSaslFinal(AuthenticationSaslFinalBody(storage))
}
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unknown authentication tag `{}`", tag),
));
}
},
PORTAL_SUSPENDED_TAG => Message::PortalSuspended,
PARAMETER_STATUS_TAG => {
let name = buf.read_cstr()?;
let value = buf.read_cstr()?;
Message::ParameterStatus(ParameterStatusBody { name, value })
}
PARAMETER_DESCRIPTION_TAG => {
let len = buf.read_u16::<BigEndian>()?;
let storage = buf.read_all();
Message::ParameterDescription(ParameterDescriptionBody { storage, len })
}
ROW_DESCRIPTION_TAG => {
let len = buf.read_u16::<BigEndian>()?;
let storage = buf.read_all();
Message::RowDescription(RowDescriptionBody { storage, len })
}
READY_FOR_QUERY_TAG => {
let status = buf.read_u8()?;
Message::ReadyForQuery(ReadyForQueryBody { status })
}
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unknown message tag `{}`", tag),
));
}
};
if !buf.is_empty() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: expected buffer to be empty",
));
}
Ok(Some(message))
}
}
/// An enum representing Postgres backend replication messages.
#[non_exhaustive]
#[derive(Debug)]
pub enum ReplicationMessage<D> {
XLogData(XLogDataBody<D>),
PrimaryKeepAlive(PrimaryKeepAliveBody),
}
impl ReplicationMessage<Bytes> {
#[inline]
pub fn parse(buf: &Bytes) -> io::Result<Self> {
let mut buf = Buffer {
bytes: buf.clone(),
idx: 0,
};
let tag = buf.read_u8()?;
let replication_message = match tag {
XLOG_DATA_TAG => {
let wal_start = buf.read_u64::<BigEndian>()?;
let wal_end = buf.read_u64::<BigEndian>()?;
let timestamp = buf.read_i64::<BigEndian>()?;
let data = buf.read_all();
ReplicationMessage::XLogData(XLogDataBody {
wal_start,
wal_end,
timestamp,
data,
})
}
PRIMARY_KEEPALIVE_TAG => {
let wal_end = buf.read_u64::<BigEndian>()?;
let timestamp = buf.read_i64::<BigEndian>()?;
let reply = buf.read_u8()?;
ReplicationMessage::PrimaryKeepAlive(PrimaryKeepAliveBody {
wal_end,
timestamp,
reply,
})
}
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unknown replication message tag `{}`", tag),
));
}
};
Ok(replication_message)
}
}
struct Buffer {
bytes: Bytes,
idx: usize,
}
impl Buffer {
#[inline]
fn slice(&self) -> &[u8] {
&self.bytes[self.idx..]
}
#[inline]
fn is_empty(&self) -> bool {
self.slice().is_empty()
}
#[inline]
fn read_cstr(&mut self) -> io::Result<Bytes> {
match memchr(0, self.slice()) {
Some(pos) => {
let start = self.idx;
let end = start + pos;
let cstr = self.bytes.slice(start..end);
self.idx = end + 1;
Ok(cstr)
}
None => Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"unexpected EOF",
)),
}
}
#[inline]
fn read_all(&mut self) -> Bytes {
let buf = self.bytes.slice(self.idx..);
self.idx = self.bytes.len();
buf
}
}
impl Read for Buffer {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let len = {
let slice = self.slice();
let len = cmp::min(slice.len(), buf.len());
buf[..len].copy_from_slice(&slice[..len]);
len
};
self.idx += len;
Ok(len)
}
}
pub struct AuthenticationMd5PasswordBody {
salt: [u8; 4],
}
impl AuthenticationMd5PasswordBody {
#[inline]
pub fn salt(&self) -> [u8; 4] {
self.salt
}
}
pub struct AuthenticationGssContinueBody(Bytes);
impl AuthenticationGssContinueBody {
#[inline]
pub fn data(&self) -> &[u8] {
&self.0
}
}
pub struct AuthenticationSaslBody(Bytes);
impl AuthenticationSaslBody {
#[inline]
pub fn mechanisms(&self) -> SaslMechanisms<'_> {
SaslMechanisms(&self.0)
}
}
pub struct SaslMechanisms<'a>(&'a [u8]);
impl<'a> FallibleIterator for SaslMechanisms<'a> {
type Item = &'a str;
type Error = io::Error;
#[inline]
fn next(&mut self) -> io::Result<Option<&'a str>> {
let value_end = find_null(self.0, 0)?;
if value_end == 0 {
if self.0.len() != 1 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"invalid message length: expected to be at end of iterator for sasl",
));
}
Ok(None)
} else {
let value = get_str(&self.0[..value_end])?;
self.0 = &self.0[value_end + 1..];
Ok(Some(value))
}
}
}
pub struct AuthenticationSaslContinueBody(Bytes);
impl AuthenticationSaslContinueBody {
#[inline]
pub fn data(&self) -> &[u8] {
&self.0
}
}
pub struct AuthenticationSaslFinalBody(Bytes);
impl AuthenticationSaslFinalBody {
#[inline]
pub fn data(&self) -> &[u8] {
&self.0
}
}
pub struct BackendKeyDataBody {
process_id: i32,
secret_key: i32,
}
impl BackendKeyDataBody {
#[inline]
pub fn process_id(&self) -> i32 {
self.process_id
}
#[inline]
pub fn secret_key(&self) -> i32 {
self.secret_key
}
}
pub struct CommandCompleteBody {
tag: Bytes,
}
impl CommandCompleteBody {
#[inline]
pub fn tag(&self) -> io::Result<&str> {
get_str(&self.tag)
}
}
pub struct CopyDataBody {
storage: Bytes,
}
impl CopyDataBody {
#[inline]
pub fn data(&self) -> &[u8] {
&self.storage
}
#[inline]
pub fn into_bytes(self) -> Bytes {
self.storage
}
}
pub struct CopyInResponseBody {
storage: Bytes,
len: u16,
format: u8,
}
impl CopyInResponseBody {
#[inline]
pub fn format(&self) -> u8 {
self.format
}
#[inline]
pub fn column_formats(&self) -> ColumnFormats<'_> {
ColumnFormats {
remaining: self.len,
buf: &self.storage,
}
}
}
pub struct ColumnFormats<'a> {
buf: &'a [u8],
remaining: u16,
}
impl<'a> FallibleIterator for ColumnFormats<'a> {
type Item = u16;
type Error = io::Error;
#[inline]
fn next(&mut self) -> io::Result<Option<u16>> {
if self.remaining == 0 {
if self.buf.is_empty() {
return Ok(None);
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: wrong column formats",
));
}
}
self.remaining -= 1;
self.buf.read_u16::<BigEndian>().map(Some)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.remaining as usize;
(len, Some(len))
}
}
pub struct CopyOutResponseBody {
storage: Bytes,
len: u16,
format: u8,
}
impl CopyOutResponseBody {
#[inline]
pub fn format(&self) -> u8 {
self.format
}
#[inline]
pub fn column_formats(&self) -> ColumnFormats<'_> {
ColumnFormats {
remaining: self.len,
buf: &self.storage,
}
}
}
pub struct CopyBothResponseBody {
storage: Bytes,
len: u16,
format: u8,
}
impl CopyBothResponseBody {
#[inline]
pub fn format(&self) -> u8 {
self.format
}
#[inline]
pub fn column_formats(&self) -> ColumnFormats<'_> {
ColumnFormats {
remaining: self.len,
buf: &self.storage,
}
}
}
pub struct DataRowBody {
storage: Bytes,
len: u16,
}
impl DataRowBody {
#[inline]
pub fn ranges(&self) -> DataRowRanges<'_> {
DataRowRanges {
buf: &self.storage,
len: self.storage.len(),
remaining: self.len,
}
}
#[inline]
pub fn buffer(&self) -> &[u8] {
&self.storage
}
}
pub struct DataRowRanges<'a> {
buf: &'a [u8],
len: usize,
remaining: u16,
}
impl<'a> FallibleIterator for DataRowRanges<'a> {
type Item = Option<Range<usize>>;
type Error = io::Error;
#[inline]
fn next(&mut self) -> io::Result<Option<Option<Range<usize>>>> {
if self.remaining == 0 {
if self.buf.is_empty() {
return Ok(None);
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: datarowrange is not empty",
));
}
}
self.remaining -= 1;
let len = self.buf.read_i32::<BigEndian>()?;
if len < 0 {
Ok(Some(None))
} else {
let len = len as usize;
if self.buf.len() < len {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"unexpected EOF",
));
}
let base = self.len - self.buf.len();
self.buf = &self.buf[len as usize..];
Ok(Some(Some(base..base + len)))
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.remaining as usize;
(len, Some(len))
}
}
pub struct ErrorResponseBody {
storage: Bytes,
}
impl ErrorResponseBody {
#[inline]
pub fn fields(&self) -> ErrorFields<'_> {
ErrorFields { buf: &self.storage }
}
}
pub struct ErrorFields<'a> {
buf: &'a [u8],
}
impl<'a> FallibleIterator for ErrorFields<'a> {
type Item = ErrorField<'a>;
type Error = io::Error;
#[inline]
fn next(&mut self) -> io::Result<Option<ErrorField<'a>>> {
let type_ = self.buf.read_u8()?;
if type_ == 0 {
if self.buf.is_empty() {
return Ok(None);
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: error fields is not drained",
));
}
}
let value_end = find_null(self.buf, 0)?;
let value = get_str(&self.buf[..value_end])?;
self.buf = &self.buf[value_end + 1..];
Ok(Some(ErrorField { type_, value }))
}
}
pub struct ErrorField<'a> {
type_: u8,
value: &'a str,
}
impl<'a> ErrorField<'a> {
#[inline]
pub fn type_(&self) -> u8 {
self.type_
}
#[inline]
pub fn value(&self) -> &str {
self.value
}
}
pub struct NoticeResponseBody {
storage: Bytes,
}
impl NoticeResponseBody {
#[inline]
pub fn fields(&self) -> ErrorFields<'_> {
ErrorFields { buf: &self.storage }
}
}
pub struct NotificationResponseBody {
process_id: i32,
channel: Bytes,
message: Bytes,
}
impl NotificationResponseBody {
#[inline]
pub fn process_id(&self) -> i32 {
self.process_id
}
#[inline]
pub fn channel(&self) -> io::Result<&str> {
get_str(&self.channel)
}
#[inline]
pub fn message(&self) -> io::Result<&str> {
get_str(&self.message)
}
}
pub struct ParameterDescriptionBody {
storage: Bytes,
len: u16,
}
impl ParameterDescriptionBody {
#[inline]
pub fn parameters(&self) -> Parameters<'_> {
Parameters {
buf: &self.storage,
remaining: self.len,
}
}
}
pub struct Parameters<'a> {
buf: &'a [u8],
remaining: u16,
}
impl<'a> FallibleIterator for Parameters<'a> {
type Item = Oid;
type Error = io::Error;
#[inline]
fn next(&mut self) -> io::Result<Option<Oid>> {
if self.remaining == 0 {
if self.buf.is_empty() {
return Ok(None);
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: parameters is not drained",
));
}
}
self.remaining -= 1;
self.buf.read_u32::<BigEndian>().map(Some)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.remaining as usize;
(len, Some(len))
}
}
pub struct ParameterStatusBody {
name: Bytes,
value: Bytes,
}
impl ParameterStatusBody {
#[inline]
pub fn name(&self) -> io::Result<&str> {
get_str(&self.name)
}
#[inline]
pub fn value(&self) -> io::Result<&str> {
get_str(&self.value)
}
}
pub struct ReadyForQueryBody {
status: u8,
}
impl ReadyForQueryBody {
#[inline]
pub fn status(&self) -> u8 {
self.status
}
}
pub struct RowDescriptionBody {
storage: Bytes,
len: u16,
}
impl RowDescriptionBody {
#[inline]
pub fn fields(&self) -> Fields<'_> {
Fields {
buf: &self.storage,
remaining: self.len,
}
}
}
#[derive(Debug)]
pub struct XLogDataBody<D> {
wal_start: u64,
wal_end: u64,
timestamp: i64,
data: D,
}
impl<D> XLogDataBody<D> {
#[inline]
pub fn wal_start(&self) -> u64 {
self.wal_start
}
#[inline]
pub fn wal_end(&self) -> u64 {
self.wal_end
}
#[inline]
pub fn timestamp(&self) -> i64 {
self.timestamp
}
#[inline]
pub fn data(&self) -> &D {
&self.data
}
#[inline]
pub fn into_data(self) -> D {
self.data
}
pub fn map_data<F, D2, E>(self, f: F) -> Result<XLogDataBody<D2>, E>
where
F: Fn(D) -> Result<D2, E>,
{
let data = f(self.data)?;
Ok(XLogDataBody {
wal_start: self.wal_start,
wal_end: self.wal_end,
timestamp: self.timestamp,
data,
})
}
}
#[derive(Debug)]
pub struct PrimaryKeepAliveBody {
wal_end: u64,
timestamp: i64,
reply: u8,
}
impl PrimaryKeepAliveBody {
#[inline]
pub fn wal_end(&self) -> u64 {
self.wal_end
}
#[inline]
pub fn timestamp(&self) -> i64 {
self.timestamp
}
#[inline]
pub fn reply(&self) -> u8 {
self.reply
}
}
#[non_exhaustive]
/// A message of the logical replication stream
#[derive(Debug)]
pub enum LogicalReplicationMessage {
/// A BEGIN statement
Begin(BeginBody),
/// A BEGIN statement
Commit(CommitBody),
/// An Origin replication message
/// Note that there can be multiple Origin messages inside a single transaction.
Origin(OriginBody),
/// A Relation replication message
Relation(RelationBody),
/// A Type replication message
Type(TypeBody),
/// An INSERT statement
Insert(InsertBody),
/// An UPDATE statement
Update(UpdateBody),
/// A DELETE statement
Delete(DeleteBody),
/// A TRUNCATE statement
Truncate(TruncateBody),
}
impl LogicalReplicationMessage {
pub fn parse(buf: &Bytes) -> io::Result<Self> {
let mut buf = Buffer {
bytes: buf.clone(),
idx: 0,
};
let tag = buf.read_u8()?;
let logical_replication_message = match tag {
BEGIN_TAG => Self::Begin(BeginBody {
final_lsn: buf.read_u64::<BigEndian>()?,
timestamp: buf.read_i64::<BigEndian>()?,
xid: buf.read_u32::<BigEndian>()?,
}),
COMMIT_TAG => Self::Commit(CommitBody {
flags: buf.read_i8()?,
commit_lsn: buf.read_u64::<BigEndian>()?,
end_lsn: buf.read_u64::<BigEndian>()?,
timestamp: buf.read_i64::<BigEndian>()?,
}),
ORIGIN_TAG => Self::Origin(OriginBody {
commit_lsn: buf.read_u64::<BigEndian>()?,
name: buf.read_cstr()?,
}),
RELATION_TAG => {
let rel_id = buf.read_u32::<BigEndian>()?;
let namespace = buf.read_cstr()?;
let name = buf.read_cstr()?;
let replica_identity = match buf.read_u8()? {
REPLICA_IDENTITY_DEFAULT_TAG => ReplicaIdentity::Default,
REPLICA_IDENTITY_NOTHING_TAG => ReplicaIdentity::Nothing,
REPLICA_IDENTITY_FULL_TAG => ReplicaIdentity::Full,
REPLICA_IDENTITY_INDEX_TAG => ReplicaIdentity::Index,
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unknown replica identity tag `{}`", tag),
));
}
};
let column_len = buf.read_i16::<BigEndian>()?;
let mut columns = Vec::with_capacity(column_len as usize);
for _ in 0..column_len {
columns.push(Column::parse(&mut buf)?);
}
Self::Relation(RelationBody {
rel_id,
namespace,
name,
replica_identity,
columns,
})
}
TYPE_TAG => Self::Type(TypeBody {
id: buf.read_u32::<BigEndian>()?,
namespace: buf.read_cstr()?,
name: buf.read_cstr()?,
}),
INSERT_TAG => {
let rel_id = buf.read_u32::<BigEndian>()?;
let tag = buf.read_u8()?;
let tuple = match tag {
TUPLE_NEW_TAG => Tuple::parse(&mut buf)?,
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unexpected tuple tag `{}`", tag),
));
}
};
Self::Insert(InsertBody { rel_id, tuple })
}
UPDATE_TAG => {
let rel_id = buf.read_u32::<BigEndian>()?;
let tag = buf.read_u8()?;
let mut key_tuple = None;
let mut old_tuple = None;
let new_tuple = match tag {
TUPLE_NEW_TAG => Tuple::parse(&mut buf)?,
TUPLE_OLD_TAG | TUPLE_KEY_TAG => {
if tag == TUPLE_OLD_TAG {
old_tuple = Some(Tuple::parse(&mut buf)?);
} else {
key_tuple = Some(Tuple::parse(&mut buf)?);
}
match buf.read_u8()? {
TUPLE_NEW_TAG => Tuple::parse(&mut buf)?,
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unexpected tuple tag `{}`", tag),
));
}
}
}
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unknown tuple tag `{}`", tag),
));
}
};
Self::Update(UpdateBody {
rel_id,
key_tuple,
old_tuple,
new_tuple,
})
}
DELETE_TAG => {
let rel_id = buf.read_u32::<BigEndian>()?;
let tag = buf.read_u8()?;
let mut key_tuple = None;
let mut old_tuple = None;
match tag {
TUPLE_OLD_TAG => old_tuple = Some(Tuple::parse(&mut buf)?),
TUPLE_KEY_TAG => key_tuple = Some(Tuple::parse(&mut buf)?),
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unknown tuple tag `{}`", tag),
));
}
}
Self::Delete(DeleteBody {
rel_id,
key_tuple,
old_tuple,
})
}
TRUNCATE_TAG => {
let relation_len = buf.read_i32::<BigEndian>()?;
let options = buf.read_i8()?;
let mut rel_ids = Vec::with_capacity(relation_len as usize);
for _ in 0..relation_len {
rel_ids.push(buf.read_u32::<BigEndian>()?);
}
Self::Truncate(TruncateBody { options, rel_ids })
}
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unknown replication message tag `{}`", tag),
));
}
};
Ok(logical_replication_message)
}
}
/// A row as it appears in the replication stream
#[derive(Debug)]
pub struct Tuple(Vec<TupleData>);
impl Tuple {
#[inline]
/// The tuple data of this tuple
pub fn tuple_data(&self) -> &[TupleData] {
&self.0
}
}
impl Tuple {
fn parse(buf: &mut Buffer) -> io::Result<Self> {
let col_len = buf.read_i16::<BigEndian>()?;
let mut tuple = Vec::with_capacity(col_len as usize);
for _ in 0..col_len {
tuple.push(TupleData::parse(buf)?);
}
Ok(Tuple(tuple))
}
}
/// A column as it appears in the replication stream
#[derive(Debug)]
pub struct Column {
flags: i8,
name: Bytes,
type_id: i32,
type_modifier: i32,
}
impl Column {
#[inline]
/// Flags for the column. Currently can be either 0 for no flags or 1 which marks the column as
/// part of the key.
pub fn flags(&self) -> i8 {
self.flags
}
#[inline]
/// Name of the column.
pub fn name(&self) -> io::Result<&str> {
get_str(&self.name)
}
#[inline]
/// ID of the column's data type.
pub fn type_id(&self) -> i32 {
self.type_id
}
#[inline]
/// Type modifier of the column (`atttypmod`).
pub fn type_modifier(&self) -> i32 {
self.type_modifier
}
}
impl Column {
fn parse(buf: &mut Buffer) -> io::Result<Self> {
Ok(Self {
flags: buf.read_i8()?,
name: buf.read_cstr()?,
type_id: buf.read_i32::<BigEndian>()?,
type_modifier: buf.read_i32::<BigEndian>()?,
})
}
}
/// The data of an individual column as it appears in the replication stream
#[derive(Debug)]
pub enum TupleData {
/// Represents a NULL value
Null,
/// Represents an unchanged TOASTed value (the actual value is not sent).
UnchangedToast,
/// Column data as text formatted value.
Text(Bytes),
}
impl TupleData {
fn parse(buf: &mut Buffer) -> io::Result<Self> {
let type_tag = buf.read_u8()?;
let tuple = match type_tag {
TUPLE_DATA_NULL_TAG => TupleData::Null,
TUPLE_DATA_TOAST_TAG => TupleData::UnchangedToast,
TUPLE_DATA_TEXT_TAG => {
let len = buf.read_i32::<BigEndian>()?;
let mut data = vec![0; len as usize];
buf.read_exact(&mut data)?;
TupleData::Text(data.into())
}
tag => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unknown replication message tag `{}`", tag),
));
}
};
Ok(tuple)
}
}
/// A BEGIN statement
#[derive(Debug)]
pub struct BeginBody {
final_lsn: u64,
timestamp: i64,
xid: u32,
}
impl BeginBody {
#[inline]
/// Gets the final lsn of the transaction
pub fn final_lsn(&self) -> Lsn {
self.final_lsn
}
#[inline]
/// Commit timestamp of the transaction. The value is in number of microseconds since PostgreSQL epoch (2000-01-01).
pub fn timestamp(&self) -> i64 {
self.timestamp
}
#[inline]
/// Xid of the transaction.
pub fn xid(&self) -> u32 {
self.xid
}
}
/// A COMMIT statement
#[derive(Debug)]
pub struct CommitBody {
flags: i8,
commit_lsn: u64,
end_lsn: u64,
timestamp: i64,
}
impl CommitBody {
#[inline]
/// The LSN of the commit.
pub fn commit_lsn(&self) -> Lsn {
self.commit_lsn
}
#[inline]
/// The end LSN of the transaction.
pub fn end_lsn(&self) -> Lsn {
self.end_lsn
}
#[inline]
/// Commit timestamp of the transaction. The value is in number of microseconds since PostgreSQL epoch (2000-01-01).
pub fn timestamp(&self) -> i64 {
self.timestamp
}
#[inline]
/// Flags; currently unused (will be 0).
pub fn flags(&self) -> i8 {
self.flags
}
}
/// An Origin replication message
///
/// Note that there can be multiple Origin messages inside a single transaction.
#[derive(Debug)]
pub struct OriginBody {
commit_lsn: u64,
name: Bytes,
}
impl OriginBody {
#[inline]
/// The LSN of the commit on the origin server.
pub fn commit_lsn(&self) -> Lsn {
self.commit_lsn
}
#[inline]
/// Name of the origin.
pub fn name(&self) -> io::Result<&str> {
get_str(&self.name)
}
}
/// Describes the REPLICA IDENTITY setting of a table
#[derive(Debug)]
pub enum ReplicaIdentity {
/// default selection for replica identity (primary key or nothing)
Default,
/// no replica identity is logged for this relation
Nothing,
/// all columns are logged as replica identity
Full,
/// An explicitly chosen candidate key's columns are used as replica identity.
/// Note this will still be set if the index has been dropped; in that case it
/// has the same meaning as 'd'.
Index,
}
/// A Relation replication message
#[derive(Debug)]
pub struct RelationBody {
rel_id: u32,
namespace: Bytes,
name: Bytes,
replica_identity: ReplicaIdentity,
columns: Vec<Column>,
}
impl RelationBody {
#[inline]
/// ID of the relation.
pub fn rel_id(&self) -> u32 {
self.rel_id
}
#[inline]
/// Namespace (empty string for pg_catalog).
pub fn namespace(&self) -> io::Result<&str> {
get_str(&self.namespace)
}
#[inline]
/// Relation name.
pub fn name(&self) -> io::Result<&str> {
get_str(&self.name)
}
#[inline]
/// Replica identity setting for the relation
pub fn replica_identity(&self) -> &ReplicaIdentity {
&self.replica_identity
}
#[inline]
/// The column definitions of this relation
pub fn columns(&self) -> &[Column] {
&self.columns
}
}
/// A Type replication message
#[derive(Debug)]
pub struct TypeBody {
id: u32,
namespace: Bytes,
name: Bytes,
}
impl TypeBody {
#[inline]
/// ID of the data type.
pub fn id(&self) -> Oid {
self.id
}
#[inline]
/// Namespace (empty string for pg_catalog).
pub fn namespace(&self) -> io::Result<&str> {
get_str(&self.namespace)
}
#[inline]
/// Name of the data type.
pub fn name(&self) -> io::Result<&str> {
get_str(&self.name)
}
}
/// An INSERT statement
#[derive(Debug)]
pub struct InsertBody {
rel_id: u32,
tuple: Tuple,
}
impl InsertBody {
#[inline]
/// ID of the relation corresponding to the ID in the relation message.
pub fn rel_id(&self) -> u32 {
self.rel_id
}
#[inline]
/// The inserted tuple
pub fn tuple(&self) -> &Tuple {
&self.tuple
}
}
/// An UPDATE statement
#[derive(Debug)]
pub struct UpdateBody {
rel_id: u32,
old_tuple: Option<Tuple>,
key_tuple: Option<Tuple>,
new_tuple: Tuple,
}
impl UpdateBody {
#[inline]
/// ID of the relation corresponding to the ID in the relation message.
pub fn rel_id(&self) -> u32 {
self.rel_id
}
#[inline]
/// This field is optional and is only present if the update changed data in any of the
/// column(s) that are part of the REPLICA IDENTITY index.
pub fn key_tuple(&self) -> Option<&Tuple> {
self.key_tuple.as_ref()
}
#[inline]
/// This field is optional and is only present if table in which the update happened has
/// REPLICA IDENTITY set to FULL.
pub fn old_tuple(&self) -> Option<&Tuple> {
self.old_tuple.as_ref()
}
#[inline]
/// The new tuple
pub fn new_tuple(&self) -> &Tuple {
&self.new_tuple
}
}
/// A DELETE statement
#[derive(Debug)]
pub struct DeleteBody {
rel_id: u32,
old_tuple: Option<Tuple>,
key_tuple: Option<Tuple>,
}
impl DeleteBody {
#[inline]
/// ID of the relation corresponding to the ID in the relation message.
pub fn rel_id(&self) -> u32 {
self.rel_id
}
#[inline]
/// This field is present if the table in which the delete has happened uses an index as
/// REPLICA IDENTITY.
pub fn key_tuple(&self) -> Option<&Tuple> {
self.key_tuple.as_ref()
}
#[inline]
/// This field is present if the table in which the delete has happened has REPLICA IDENTITY
/// set to FULL.
pub fn old_tuple(&self) -> Option<&Tuple> {
self.old_tuple.as_ref()
}
}
/// A TRUNCATE statement
#[derive(Debug)]
pub struct TruncateBody {
options: i8,
rel_ids: Vec<u32>,
}
impl TruncateBody {
#[inline]
/// The IDs of the relations corresponding to the ID in the relation messages
pub fn rel_ids(&self) -> &[u32] {
&self.rel_ids
}
#[inline]
/// Option bits for TRUNCATE: 1 for CASCADE, 2 for RESTART IDENTITY
pub fn options(&self) -> i8 {
self.options
}
}
pub struct Fields<'a> {
buf: &'a [u8],
remaining: u16,
}
impl<'a> FallibleIterator for Fields<'a> {
type Item = Field<'a>;
type Error = io::Error;
#[inline]
fn next(&mut self) -> io::Result<Option<Field<'a>>> {
if self.remaining == 0 {
if self.buf.is_empty() {
return Ok(None);
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid message length: field is not drained",
));
}
}
self.remaining -= 1;
let name_end = find_null(self.buf, 0)?;
let name = get_str(&self.buf[..name_end])?;
self.buf = &self.buf[name_end + 1..];
let table_oid = self.buf.read_u32::<BigEndian>()?;
let column_id = self.buf.read_i16::<BigEndian>()?;
let type_oid = self.buf.read_u32::<BigEndian>()?;
let type_size = self.buf.read_i16::<BigEndian>()?;
let type_modifier = self.buf.read_i32::<BigEndian>()?;
let format = self.buf.read_i16::<BigEndian>()?;
Ok(Some(Field {
name,
table_oid,
column_id,
type_oid,
type_size,
type_modifier,
format,
}))
}
}
pub struct Field<'a> {
name: &'a str,
table_oid: Oid,
column_id: i16,
type_oid: Oid,
type_size: i16,
type_modifier: i32,
format: i16,
}
impl<'a> Field<'a> {
#[inline]
pub fn name(&self) -> &'a str {
self.name
}
#[inline]
pub fn table_oid(&self) -> Oid {
self.table_oid
}
#[inline]
pub fn column_id(&self) -> i16 {
self.column_id
}
#[inline]
pub fn type_oid(&self) -> Oid {
self.type_oid
}
#[inline]
pub fn type_size(&self) -> i16 {
self.type_size
}
#[inline]
pub fn type_modifier(&self) -> i32 {
self.type_modifier
}
#[inline]
pub fn format(&self) -> i16 {
self.format
}
}
#[inline]
fn find_null(buf: &[u8], start: usize) -> io::Result<usize> {
match memchr(0, &buf[start..]) {
Some(pos) => Ok(pos + start),
None => Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"unexpected EOF",
)),
}
}
#[inline]
fn get_str(buf: &[u8]) -> io::Result<&str> {
str::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))
}
|
//! Helpers for IO related tasks.
//!
//! These types are often used in combination with hyper or reqwest, as they
//! allow converting between a hyper [`Body`] and [`AsyncRead`].
//!
//! [`Body`]: https://docs.rs/hyper/0.13/hyper/struct.Body.html
//! [`AsyncRead`]: tokio::io::AsyncRead
mod read_buf;
mod reader_stream;
mod stream_reader;
pub use self::read_buf::read_buf;
pub use self::reader_stream::ReaderStream;
pub use self::stream_reader::StreamReader;
pub use crate::util::{poll_read_buf, poll_write_buf};
|
use pnet::datalink::{self, NetworkInterface};
use pnet::datalink::Channel;
// TODO: set Config.channel_type = Layer3
pub fn get_connection(interface: &NetworkInterface) -> Channel {
match datalink::channel(&interface, Default::default()) {
Ok(ether) => ether,
Err(e) => {
panic!(
"An error occurred when creating the datalink channel: {}",
e
)
}
}
}
|
struct Solution;
impl Solution {
pub fn num_trees(n: i32) -> i32 {
let n = n as usize;
let mut g = vec![0; n + 1];
g[0] = 1;
g[1] = 1;
for i in 2..=n {
// G[i]
for j in 1..=i {
// j 是根
g[i] += g[j - 1] * g[i - j];
}
}
g[n]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_num_trees() {
assert_eq!(Solution::num_trees(3), 5);
}
}
|
//! Module for database interaction with relation to login
use crate::env::Env;
use rusqlite::named_params;
use crate::api::oauth::LoginData;
use crate::{Result, unwrap_db_err};
/// Save login data to the database
///
/// ## Errors
/// - When a database operation fails
pub fn save_to_database(login_data: &LoginData, env: &Env) -> Result<()> {
let conn = unwrap_db_err!(env.get_conn());
if login_data.refresh_token.is_some() {
unwrap_db_err!(conn.execute("DELETE FROM user", named_params! {}));
}
let expiry_time = chrono::Utc::now().timestamp() + login_data.expires_in;
unwrap_db_err!(if login_data.refresh_token.is_some() {
conn.execute("INSERT INTO user (refresh_token, access_token, expiry) VALUES (:refresh_token, :access_token, :expiry)", named_params! {
":refresh_token": &login_data.refresh_token.as_ref().unwrap(),
":access_token": &login_data.access_token,
":expiry": expiry_time
})
} else {
conn.execute("UPDATE user SET access_token = :access_token, expiry = :expiry", named_params! {
":access_token": &login_data.access_token,
":expiry": expiry_time
})
});
Ok(())
} |
//! The `bank` module tracks client accounts and the progress of smart
//! contracts. It offers a high-level API that signs transactions
//! on behalf of the caller, and a low-level API for when they have
//! already been signed and verified.
use bincode::deserialize;
use bincode::serialize;
use budget_program::BudgetState;
use budget_transaction::BudgetTransaction;
use counter::Counter;
use entry::Entry;
use hash::{hash, Hash};
use itertools::Itertools;
use jsonrpc_macros::pubsub::Sink;
use leader_scheduler::LeaderScheduler;
use ledger::Block;
use log::Level;
use mint::Mint;
use native_loader;
use payment_plan::Payment;
use poh_recorder::PohRecorder;
use rayon::prelude::*;
use rpc::RpcSignatureStatus;
use signature::Keypair;
use signature::Signature;
use solana_program_interface::account::{Account, KeyedAccount};
use solana_program_interface::pubkey::Pubkey;
use std;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::result;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Mutex, RwLock};
use std::time::Instant;
use storage_program::StorageProgram;
use system_program::SystemProgram;
use system_transaction::SystemTransaction;
use tictactoe_dashboard_program::TicTacToeDashboardProgram;
use tictactoe_program::TicTacToeProgram;
use timing::{duration_as_us, timestamp};
use token_program::TokenProgram;
use tokio::prelude::Future;
use transaction::Transaction;
use window::WINDOW_SIZE;
/// The number of most recent `last_id` values that the bank will track the signatures
/// of. Once the bank discards a `last_id`, it will reject any transactions that use
/// that `last_id` in a transaction. Lowering this value reduces memory consumption,
/// but requires clients to update its `last_id` more frequently. Raising the value
/// lengthens the time a client must wait to be certain a missing transaction will
/// not be processed by the network.
pub const NUM_TICKS_PER_SECOND: usize = 10;
pub const MAX_ENTRY_IDS: usize = NUM_TICKS_PER_SECOND * 120;
pub const VERIFY_BLOCK_SIZE: usize = 16;
/// Reasons a transaction might be rejected.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum BankError {
/// This Pubkey is being processed in another transaction
AccountInUse,
/// Attempt to debit from `Pubkey`, but no found no record of a prior credit.
AccountNotFound,
/// The from `Pubkey` does not have sufficient balance to pay the fee to schedule the transaction
InsufficientFundsForFee,
/// The bank has seen `Signature` before. This can occur under normal operation
/// when a UDP packet is duplicated, as a user error from a client not updating
/// its `last_id`, or as a double-spend attack.
DuplicateSignature,
/// The bank has not seen the given `last_id` or the transaction is too old and
/// the `last_id` has been discarded.
LastIdNotFound,
/// The bank has not seen a transaction with the given `Signature` or the transaction is
/// too old and has been discarded.
SignatureNotFound,
/// Proof of History verification failed.
LedgerVerificationFailed,
/// Contract's transaction token balance does not equal the balance after the transaction
UnbalancedTransaction(u8),
/// Contract's transactions resulted in an account with a negative balance
/// The difference from InsufficientFundsForFee is that the transaction was executed by the
/// contract
ResultWithNegativeTokens(u8),
/// Contract id is unknown
UnknownContractId(u8),
/// Contract modified an accounts contract id
ModifiedContractId(u8),
/// Contract spent the tokens of an account that doesn't belong to it
ExternalAccountTokenSpend(u8),
/// The program returned an error
ProgramRuntimeError(u8),
/// Recoding into PoH failed
RecordFailure,
/// Loader call chain too deep
CallChainTooDeep,
}
pub type Result<T> = result::Result<T, BankError>;
type SignatureStatusMap = HashMap<Signature, Result<()>>;
#[derive(Default)]
struct ErrorCounters {
account_not_found: usize,
account_in_use: usize,
last_id_not_found: usize,
reserve_last_id: usize,
insufficient_funds: usize,
duplicate_signature: usize,
}
pub struct LastIds {
/// A FIFO queue of `last_id` items, where each item is a set of signatures
/// that have been processed using that `last_id`. Rejected `last_id`
/// values are so old that the `last_id` has been pulled out of the queue.
/// updated whenever an id is registered
nth: isize,
/// last id to be registered
last: Option<Hash>,
/// Mapping of hashes to signature sets along with timestamp and what nth
/// was when the id was added. The bank uses this data to
/// reject transactions with signatures it's seen before and to reject
/// transactions that are too old (nth is too small)
sigs: HashMap<Hash, (SignatureStatusMap, u64, isize)>,
}
impl Default for LastIds {
fn default() -> Self {
LastIds {
nth: 0,
last: None,
sigs: HashMap::new(),
}
}
}
/// The state of all accounts and contracts after processing its entries.
pub struct Bank {
/// A map of account public keys to the balance in that account.
accounts: RwLock<HashMap<Pubkey, Account>>,
/// set of accounts which are currently in the pipeline
account_locks: Mutex<HashSet<Pubkey>>,
/// FIFO queue of `last_id` items
last_ids: RwLock<LastIds>,
/// The number of transactions the bank has processed without error since the
/// start of the ledger.
transaction_count: AtomicUsize,
// The latest finality time for the network
finality_time: AtomicUsize,
// Mapping of account ids to Subscriber ids and sinks to notify on userdata update
account_subscriptions: RwLock<HashMap<Pubkey, HashMap<Pubkey, Sink<Account>>>>,
// Mapping of signatures to Subscriber ids and sinks to notify on confirmation
signature_subscriptions: RwLock<HashMap<Signature, HashMap<Pubkey, Sink<RpcSignatureStatus>>>>,
}
impl Default for Bank {
fn default() -> Self {
Bank {
accounts: RwLock::new(HashMap::new()),
account_locks: Mutex::new(HashSet::new()),
last_ids: RwLock::new(LastIds::default()),
transaction_count: AtomicUsize::new(0),
finality_time: AtomicUsize::new(std::usize::MAX),
account_subscriptions: RwLock::new(HashMap::new()),
signature_subscriptions: RwLock::new(HashMap::new()),
}
}
}
impl Bank {
/// Create an Bank using a deposit.
pub fn new_from_deposit(deposit: &Payment) -> Self {
let bank = Self::default();
{
let mut accounts = bank.accounts.write().unwrap();
let account = accounts.entry(deposit.to).or_insert_with(Account::default);
Self::apply_payment(deposit, account);
}
bank
}
/// Create an Bank with only a Mint. Typically used by unit tests.
pub fn new(mint: &Mint) -> Self {
let deposit = Payment {
to: mint.pubkey(),
tokens: mint.tokens,
};
let bank = Self::new_from_deposit(&deposit);
bank.register_entry_id(&mint.last_id());
bank
}
/// Commit funds to the given account
fn apply_payment(payment: &Payment, account: &mut Account) {
trace!("apply payments {}", payment.tokens);
account.tokens += payment.tokens;
}
/// Return the last entry ID registered.
pub fn last_id(&self) -> Hash {
self.last_ids
.read()
.unwrap()
.last
.expect("no last_id has been set")
}
/// Store the given signature. The bank will reject any transaction with the same signature.
fn reserve_signature(signatures: &mut SignatureStatusMap, signature: &Signature) -> Result<()> {
if let Some(_result) = signatures.get(signature) {
return Err(BankError::DuplicateSignature);
}
signatures.insert(*signature, Ok(()));
Ok(())
}
/// Forget all signatures. Useful for benchmarking.
pub fn clear_signatures(&self) {
for sigs in &mut self.last_ids.write().unwrap().sigs.values_mut() {
sigs.0.clear();
}
}
/// Check if the age of the entry_id is within the max_age
/// return false for any entries with an age equal to or above max_age
fn check_entry_id_age(last_ids: &LastIds, entry_id: Hash, max_age: usize) -> bool {
let entry = last_ids.sigs.get(&entry_id);
match entry {
Some(entry) => ((last_ids.nth - entry.2) as usize) < max_age,
_ => false,
}
}
fn reserve_signature_with_last_id(
last_ids: &mut LastIds,
last_id: &Hash,
sig: &Signature,
) -> Result<()> {
if let Some(entry) = last_ids.sigs.get_mut(last_id) {
if ((last_ids.nth - entry.2) as usize) <= MAX_ENTRY_IDS {
return Self::reserve_signature(&mut entry.0, sig);
}
}
Err(BankError::LastIdNotFound)
}
#[cfg(test)]
fn reserve_signature_with_last_id_test(&self, sig: &Signature, last_id: &Hash) -> Result<()> {
let mut last_ids = self.last_ids.write().unwrap();
Self::reserve_signature_with_last_id(&mut last_ids, last_id, sig)
}
fn update_signature_status(
signatures: &mut SignatureStatusMap,
signature: &Signature,
result: &Result<()>,
) {
let entry = signatures.entry(*signature).or_insert(Ok(()));
*entry = result.clone();
}
fn update_signature_status_with_last_id(
last_ids_sigs: &mut HashMap<Hash, (SignatureStatusMap, u64, isize)>,
signature: &Signature,
result: &Result<()>,
last_id: &Hash,
) {
if let Some(entry) = last_ids_sigs.get_mut(last_id) {
Self::update_signature_status(&mut entry.0, signature, result);
}
}
fn update_transaction_statuses(&self, txs: &[Transaction], res: &[Result<()>]) {
let mut last_ids = self.last_ids.write().unwrap();
for (i, tx) in txs.iter().enumerate() {
Self::update_signature_status_with_last_id(
&mut last_ids.sigs,
&tx.signature,
&res[i],
&tx.last_id,
);
if res[i] != Err(BankError::SignatureNotFound) {
let status = match res[i] {
Ok(_) => RpcSignatureStatus::Confirmed,
Err(BankError::ProgramRuntimeError(_)) => {
RpcSignatureStatus::ProgramRuntimeError
}
Err(_) => RpcSignatureStatus::GenericFailure,
};
if status != RpcSignatureStatus::SignatureNotFound {
self.check_signature_subscriptions(&tx.signature, status);
}
}
}
}
/// Look through the last_ids and find all the valid ids
/// This is batched to avoid holding the lock for a significant amount of time
///
/// Return a vec of tuple of (valid index, timestamp)
/// index is into the passed ids slice to avoid copying hashes
pub fn count_valid_ids(&self, ids: &[Hash]) -> Vec<(usize, u64)> {
let last_ids = self.last_ids.read().unwrap();
let mut ret = Vec::new();
for (i, id) in ids.iter().enumerate() {
if let Some(entry) = last_ids.sigs.get(id) {
if ((last_ids.nth - entry.2) as usize) <= MAX_ENTRY_IDS {
ret.push((i, entry.1));
}
}
}
ret
}
/// Tell the bank which Entry IDs exist on the ledger. This function
/// assumes subsequent calls correspond to later entries, and will boot
/// the oldest ones once its internal cache is full. Once boot, the
/// bank will reject transactions using that `last_id`.
pub fn register_entry_id(&self, last_id: &Hash) {
let mut last_ids = self.last_ids.write().unwrap();
let last_ids_nth = last_ids.nth;
// this clean up can be deferred until sigs gets larger
// because we verify entry.nth every place we check for validity
if last_ids.sigs.len() >= MAX_ENTRY_IDS {
last_ids
.sigs
.retain(|_, (_, _, nth)| ((last_ids_nth - *nth) as usize) <= MAX_ENTRY_IDS);
}
last_ids
.sigs
.insert(*last_id, (HashMap::new(), timestamp(), last_ids_nth));
last_ids.nth += 1;
last_ids.last = Some(*last_id);
inc_new_counter_info!("bank-register_entry_id-registered", 1);
}
/// Process a Transaction. This is used for unit tests and simply calls the vector Bank::process_transactions method.
pub fn process_transaction(&self, tx: &Transaction) -> Result<()> {
let txs = vec![tx.clone()];
match self.process_transactions(&txs)[0] {
Err(ref e) => {
info!("process_transaction error: {:?}", e);
Err((*e).clone())
}
Ok(_) => Ok(()),
}
}
fn lock_account(
account_locks: &mut HashSet<Pubkey>,
keys: &[Pubkey],
error_counters: &mut ErrorCounters,
) -> Result<()> {
// Copy all the accounts
for k in keys {
if account_locks.contains(k) {
error_counters.account_in_use += 1;
return Err(BankError::AccountInUse);
}
}
for k in keys {
account_locks.insert(*k);
}
Ok(())
}
fn unlock_account(tx: &Transaction, result: &Result<()>, account_locks: &mut HashSet<Pubkey>) {
match result {
Err(BankError::AccountInUse) => (),
_ => for k in &tx.account_keys {
account_locks.remove(k);
},
}
}
fn load_account(
&self,
tx: &Transaction,
accounts: &HashMap<Pubkey, Account>,
last_ids: &mut LastIds,
max_age: usize,
error_counters: &mut ErrorCounters,
) -> Result<Vec<Account>> {
// Copy all the accounts
if accounts.get(&tx.account_keys[0]).is_none() {
error_counters.account_not_found += 1;
Err(BankError::AccountNotFound)
} else if accounts.get(&tx.account_keys[0]).unwrap().tokens < tx.fee {
error_counters.insufficient_funds += 1;
Err(BankError::InsufficientFundsForFee)
} else {
if !Self::check_entry_id_age(&last_ids, tx.last_id, max_age) {
error_counters.last_id_not_found += 1;
return Err(BankError::LastIdNotFound);
}
// There is no way to predict what contract will execute without an error
// If a fee can pay for execution then the contract will be scheduled
let err = Self::reserve_signature_with_last_id(last_ids, &tx.last_id, &tx.signature);
if let Err(BankError::LastIdNotFound) = err {
error_counters.reserve_last_id += 1;
} else if let Err(BankError::DuplicateSignature) = err {
error_counters.duplicate_signature += 1;
}
err?;
let mut called_accounts: Vec<Account> = tx
.account_keys
.iter()
.map(|key| accounts.get(key).cloned().unwrap_or_default())
.collect();
called_accounts[0].tokens -= tx.fee;
Ok(called_accounts)
}
}
/// This function will prevent multiple threads from modifying the same account state at the
/// same time
#[must_use]
fn lock_accounts(&self, txs: &[Transaction]) -> Vec<Result<()>> {
let mut account_locks = self.account_locks.lock().unwrap();
let mut error_counters = ErrorCounters::default();
let rv = txs
.iter()
.map(|tx| Self::lock_account(&mut account_locks, &tx.account_keys, &mut error_counters))
.collect();
if error_counters.account_in_use != 0 {
inc_new_counter_info!(
"bank-process_transactions-account_in_use",
error_counters.account_in_use
);
}
rv
}
/// Once accounts are unlocked, new transactions that modify that state can enter the pipeline
fn unlock_accounts(&self, txs: &[Transaction], results: &[Result<()>]) {
debug!("bank unlock accounts");
let mut account_locks = self.account_locks.lock().unwrap();
txs.iter()
.zip(results.iter())
.for_each(|(tx, result)| Self::unlock_account(tx, result, &mut account_locks));
}
fn load_accounts(
&self,
txs: &[Transaction],
results: Vec<Result<()>>,
max_age: usize,
error_counters: &mut ErrorCounters,
) -> Vec<(Result<Vec<Account>>)> {
let accounts = self.accounts.read().unwrap();
let mut last_ids = self.last_ids.write().unwrap();
txs.iter()
.zip(results.into_iter())
.map(|etx| match etx {
(tx, Ok(())) => {
self.load_account(tx, &accounts, &mut last_ids, max_age, error_counters)
}
(_, Err(e)) => Err(e),
}).collect()
}
pub fn verify_transaction(
instruction_index: usize,
tx_program_id: &Pubkey,
pre_program_id: &Pubkey,
pre_tokens: i64,
account: &Account,
) -> Result<()> {
// Verify the transaction
// Make sure that program_id is still the same or this was just assigned by the system call contract
if *pre_program_id != account.program_id && !SystemProgram::check_id(&tx_program_id) {
return Err(BankError::ModifiedContractId(instruction_index as u8));
}
// For accounts unassigned to the contract, the individual balance of each accounts cannot decrease.
if *tx_program_id != account.program_id && pre_tokens > account.tokens {
return Err(BankError::ExternalAccountTokenSpend(
instruction_index as u8,
));
}
if account.tokens < 0 {
return Err(BankError::ResultWithNegativeTokens(instruction_index as u8));
}
Ok(())
}
/// Execute a function with a subset of accounts as writable references.
/// Since the subset can point to the same references, in any order there is no way
/// for the borrow checker to track them with regards to the original set.
fn with_subset<F, A>(accounts: &mut [Account], ixes: &[u8], func: F) -> A
where
F: Fn(&mut [&mut Account]) -> A,
{
let mut subset: Vec<&mut Account> = ixes
.iter()
.map(|ix| {
let ptr = &mut accounts[*ix as usize] as *mut Account;
// lifetime of this unsafe is only within the scope of the closure
// there is no way to reorder them without breaking borrow checker rules
unsafe { &mut *ptr }
}).collect();
func(&mut subset)
}
/// Execute an instruction
/// This method calls the instruction's program entry pont method and verifies that the result of
/// the call does not violate the bank's accounting rules.
/// The accounts are committed back to the bank only if this function returns Ok(_).
fn execute_instruction(
&self,
tx: &Transaction,
instruction_index: usize,
program_accounts: &mut [&mut Account],
) -> Result<()> {
let tx_program_id = tx.program_id(instruction_index);
// TODO: the runtime should be checking read/write access to memory
// we are trusting the hard coded contracts not to clobber or allocate
let pre_total: i64 = program_accounts.iter().map(|a| a.tokens).sum();
let pre_data: Vec<_> = program_accounts
.iter_mut()
.map(|a| (a.program_id, a.tokens))
.collect();
// Check account subscriptions before storing data for notifications
let subscriptions = self.account_subscriptions.read().unwrap();
let pre_userdata: Vec<_> = tx
.account_keys
.iter()
.enumerate()
.zip(program_accounts.iter_mut())
.filter(|((_, pubkey), _)| subscriptions.get(&pubkey).is_some())
.map(|((i, pubkey), a)| ((i, pubkey), a.userdata.clone()))
.collect();
// Call the contract method
// It's up to the contract to implement its own rules on moving funds
if SystemProgram::check_id(&tx_program_id) {
if SystemProgram::process_transaction(&tx, instruction_index, program_accounts).is_err()
{
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
}
} else if BudgetState::check_id(&tx_program_id) {
if BudgetState::process_transaction(&tx, instruction_index, program_accounts).is_err() {
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
}
} else if StorageProgram::check_id(&tx_program_id) {
if StorageProgram::process_transaction(&tx, instruction_index, program_accounts)
.is_err()
{
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
}
} else if TicTacToeProgram::check_id(&tx_program_id) {
if TicTacToeProgram::process_transaction(&tx, instruction_index, program_accounts)
.is_err()
{
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
}
} else if TicTacToeDashboardProgram::check_id(&tx_program_id) {
if TicTacToeDashboardProgram::process_transaction(
&tx,
instruction_index,
program_accounts,
).is_err()
{
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
}
} else if TokenProgram::check_id(&tx_program_id) {
if TokenProgram::process_transaction(&tx, instruction_index, program_accounts).is_err()
{
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
}
} else {
let mut depth = 0;
let mut keys = Vec::new();
let mut accounts = Vec::new();
let mut program_id = tx.program_ids[instruction_index];
loop {
if native_loader::check_id(&program_id) {
// at the root of the chain, ready to dispatch
break;
}
if depth >= 5 {
return Err(BankError::CallChainTooDeep);
}
depth += 1;
let program = match self.get_account(&program_id) {
Some(program) => program,
None => return Err(BankError::AccountNotFound),
};
if !program.executable || program.loader_program_id == Pubkey::default() {
return Err(BankError::AccountNotFound);
}
// add loader to chain
keys.insert(0, program_id);
accounts.insert(0, program.clone());
program_id = program.loader_program_id;
}
let mut keyed_accounts: Vec<_> = (&keys)
.into_iter()
.zip(accounts.iter_mut())
.map(|(key, account)| KeyedAccount { key, account })
.collect();
let mut keyed_accounts2: Vec<_> = (&tx.instructions[instruction_index].accounts)
.into_iter()
.zip(program_accounts.iter_mut())
.map(|(index, account)| KeyedAccount {
key: &tx.account_keys[*index as usize],
account,
}).collect();
keyed_accounts.append(&mut keyed_accounts2);
if !native_loader::process_transaction(
&mut keyed_accounts,
&tx.instructions[instruction_index].userdata,
) {
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
}
}
// Verify the transaction
for ((pre_program_id, pre_tokens), post_account) in
pre_data.iter().zip(program_accounts.iter())
{
Self::verify_transaction(
instruction_index,
&tx_program_id,
pre_program_id,
*pre_tokens,
post_account,
)?;
}
// Send notifications
for ((i, pubkey), userdata) in &pre_userdata {
let account = &program_accounts[*i];
if userdata != &account.userdata {
self.check_account_subscriptions(&pubkey, &account);
}
}
// The total sum of all the tokens in all the pages cannot change.
let post_total: i64 = program_accounts.iter().map(|a| a.tokens).sum();
if pre_total != post_total {
Err(BankError::UnbalancedTransaction(instruction_index as u8))
} else {
Ok(())
}
}
/// Execute a transaction.
/// This method calls each instruction in the transaction over the set of loaded Accounts
/// The accounts are committed back to the bank only if every instruction succeeds
fn execute_transaction(&self, tx: &Transaction, tx_accounts: &mut [Account]) -> Result<()> {
for (instruction_index, instruction) in tx.instructions.iter().enumerate() {
Self::with_subset(tx_accounts, &instruction.accounts, |program_accounts| {
self.execute_instruction(tx, instruction_index, program_accounts)
})?;
}
Ok(())
}
pub fn store_accounts(
&self,
txs: &[Transaction],
res: &[Result<()>],
loaded: &[Result<Vec<Account>>],
) {
let mut accounts = self.accounts.write().unwrap();
for (i, racc) in loaded.iter().enumerate() {
if res[i].is_err() || racc.is_err() {
continue;
}
let tx = &txs[i];
let acc = racc.as_ref().unwrap();
for (key, account) in tx.account_keys.iter().zip(acc.iter()) {
//purge if 0
if account.tokens == 0 {
accounts.remove(&key);
} else {
*accounts.entry(*key).or_insert_with(Account::default) = account.clone();
assert_eq!(accounts.get(key).unwrap().tokens, account.tokens);
}
}
}
}
pub fn process_and_record_transactions(
&self,
txs: &[Transaction],
poh: &PohRecorder,
) -> Result<()> {
let now = Instant::now();
// Once accounts are locked, other threads cannot encode transactions that will modify the
// same account state
let locked_accounts = self.lock_accounts(txs);
let lock_time = now.elapsed();
let now = Instant::now();
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
// the likelyhood of any single thread getting starved and processing old ids.
// TODO: Banking stage threads should be prioritized to complete faster then this queue
// expires.
let results = self.execute_and_commit_transactions(txs, locked_accounts, MAX_ENTRY_IDS / 2);
let process_time = now.elapsed();
let now = Instant::now();
self.record_transactions(txs, &results, poh)?;
let record_time = now.elapsed();
let now = Instant::now();
// Once the accounts are unlocked new transactions can enter the pipeline to process them
self.unlock_accounts(&txs, &results);
let unlock_time = now.elapsed();
debug!(
"lock: {}us process: {}us record: {}us unlock: {}us txs_len={}",
duration_as_us(&lock_time),
duration_as_us(&process_time),
duration_as_us(&record_time),
duration_as_us(&unlock_time),
txs.len(),
);
Ok(())
}
fn record_transactions(
&self,
txs: &[Transaction],
results: &[Result<()>],
poh: &PohRecorder,
) -> Result<()> {
let processed_transactions: Vec<_> = results
.iter()
.zip(txs.iter())
.filter_map(|(r, x)| match r {
Ok(_) => Some(x.clone()),
Err(ref e) => {
debug!("process transaction failed {:?}", e);
None
}
}).collect();
// unlock all the accounts with errors which are filtered by the above `filter_map`
if !processed_transactions.is_empty() {
let hash = Transaction::hash(&processed_transactions);
debug!("processed ok: {} {}", processed_transactions.len(), hash);
// record and unlock will unlock all the successfull transactions
poh.record(hash, processed_transactions).map_err(|e| {
warn!("record failure: {:?}", e);
BankError::RecordFailure
})?;
}
Ok(())
}
/// Process a batch of transactions.
#[must_use]
pub fn execute_and_commit_transactions(
&self,
txs: &[Transaction],
locked_accounts: Vec<Result<()>>,
max_age: usize,
) -> Vec<Result<()>> {
debug!("processing transactions: {}", txs.len());
let mut error_counters = ErrorCounters::default();
let now = Instant::now();
let mut loaded_accounts =
self.load_accounts(txs, locked_accounts, max_age, &mut error_counters);
let load_elapsed = now.elapsed();
let now = Instant::now();
let executed: Vec<Result<()>> = loaded_accounts
.iter_mut()
.zip(txs.iter())
.map(|(acc, tx)| match acc {
Err(e) => Err(e.clone()),
Ok(ref mut accounts) => self.execute_transaction(tx, accounts),
}).collect();
let execution_elapsed = now.elapsed();
let now = Instant::now();
self.store_accounts(txs, &executed, &loaded_accounts);
// once committed there is no way to unroll
let write_elapsed = now.elapsed();
debug!(
"load: {}us execute: {}us store: {}us txs_len={}",
duration_as_us(&load_elapsed),
duration_as_us(&execution_elapsed),
duration_as_us(&write_elapsed),
txs.len(),
);
self.update_transaction_statuses(txs, &executed);
let mut tx_count = 0;
let mut err_count = 0;
for (r, tx) in executed.iter().zip(txs.iter()) {
if r.is_ok() {
tx_count += 1;
} else {
if err_count == 0 {
info!("tx error: {:?} {:?}", r, tx);
}
err_count += 1;
}
}
if err_count > 0 {
info!("{} errors of {} txs", err_count, err_count + tx_count);
inc_new_counter_info!(
"bank-process_transactions-account_not_found",
error_counters.account_not_found
);
inc_new_counter_info!("bank-process_transactions-error_count", err_count);
}
self.transaction_count
.fetch_add(tx_count, Ordering::Relaxed);
inc_new_counter_info!("bank-process_transactions-txs", tx_count);
if 0 != error_counters.last_id_not_found {
inc_new_counter_info!(
"bank-process_transactions-error-last_id_not_found",
error_counters.last_id_not_found
);
}
if 0 != error_counters.reserve_last_id {
inc_new_counter_info!(
"bank-process_transactions-error-reserve_last_id",
error_counters.reserve_last_id
);
}
if 0 != error_counters.duplicate_signature {
inc_new_counter_info!(
"bank-process_transactions-error-duplicate_signature",
error_counters.duplicate_signature
);
}
if 0 != error_counters.insufficient_funds {
inc_new_counter_info!(
"bank-process_transactions-error-insufficient_funds",
error_counters.insufficient_funds
);
}
executed
}
#[must_use]
pub fn process_transactions(&self, txs: &[Transaction]) -> Vec<Result<()>> {
let locked_accounts = self.lock_accounts(txs);
let results = self.execute_and_commit_transactions(txs, locked_accounts, MAX_ENTRY_IDS);
self.unlock_accounts(txs, &results);
results
}
pub fn process_entry(
&self,
entry: &Entry,
tick_height: &mut u64,
leader_scheduler: &mut LeaderScheduler,
) -> Result<()> {
if !entry.is_tick() {
for result in self.process_transactions(&entry.transactions) {
result?;
}
} else {
*tick_height += 1;
self.register_entry_id(&entry.id);
}
self.process_entry_votes(entry, *tick_height, leader_scheduler);
Ok(())
}
fn process_entry_votes(
&self,
entry: &Entry,
tick_height: u64,
leader_scheduler: &mut LeaderScheduler,
) {
for tx in &entry.transactions {
if tx.vote().is_some() {
// Update the active set in the leader scheduler
leader_scheduler.push_vote(*tx.from(), tick_height);
}
}
leader_scheduler.update_height(tick_height, self);
}
/// Process an ordered list of entries, populating a circular buffer "tail"
/// as we go.
fn process_entries_tail(
&self,
entries: &[Entry],
tail: &mut Vec<Entry>,
tail_idx: &mut usize,
tick_height: &mut u64,
leader_scheduler: &mut LeaderScheduler,
) -> Result<u64> {
let mut entry_count = 0;
for entry in entries {
if tail.len() > *tail_idx {
tail[*tail_idx] = entry.clone();
} else {
tail.push(entry.clone());
}
*tail_idx = (*tail_idx + 1) % WINDOW_SIZE as usize;
entry_count += 1;
// TODO: We prepare for implementing voting contract by making the associated
// process_entries functions aware of the vote-tracking structure inside
// the leader scheduler. Next we will extract the vote tracking structure
// out of the leader scheduler, and into the bank, and remove the leader
// scheduler from these banking functions.
self.process_entry(entry, tick_height, leader_scheduler)?;
}
Ok(entry_count)
}
/// Process an ordered list of entries.
pub fn process_entries(&self, entries: &[Entry]) -> Result<()> {
self.par_process_entries(entries)
}
pub fn first_err(results: &[Result<()>]) -> Result<()> {
for r in results {
r.clone()?;
}
Ok(())
}
pub fn par_execute_entries(&self, entries: &[(&Entry, Vec<Result<()>>)]) -> Result<()> {
inc_new_counter_info!("bank-par_execute_entries-count", entries.len());
let results: Vec<Result<()>> = entries
.into_par_iter()
.map(|(e, locks)| {
let results = self.execute_and_commit_transactions(
&e.transactions,
locks.to_vec(),
MAX_ENTRY_IDS,
);
self.unlock_accounts(&e.transactions, &results);
Self::first_err(&results)
}).collect();
Self::first_err(&results)
}
/// process entries in parallel
/// 1. In order lock accounts for each entry while the lock succeeds, up to a Tick entry
/// 2. Process the locked group in parallel
/// 3. Register the `Tick` if it's available, goto 1
pub fn par_process_entries(&self, entries: &[Entry]) -> Result<()> {
// accumulator for entries that can be processed in parallel
let mut mt_group = vec![];
for entry in entries {
if entry.is_tick() {
// if its a tick, execute the group and register the tick
self.par_execute_entries(&mt_group)?;
self.register_entry_id(&entry.id);
mt_group = vec![];
continue;
}
// try to lock the accounts
let locked = self.lock_accounts(&entry.transactions);
// if any of the locks error out
// execute the current group
if Self::first_err(&locked).is_err() {
self.par_execute_entries(&mt_group)?;
mt_group = vec![];
//reset the lock and push the entry
let locked = self.lock_accounts(&entry.transactions);
mt_group.push((entry, locked));
} else {
// push the entry to the mt_group
mt_group.push((entry, locked));
}
}
self.par_execute_entries(&mt_group)?;
Ok(())
}
/// Append entry blocks to the ledger, verifying them along the way.
fn process_blocks<I>(
&self,
start_hash: Hash,
entries: I,
tail: &mut Vec<Entry>,
tail_idx: &mut usize,
leader_scheduler: &mut LeaderScheduler,
) -> Result<(u64, u64)>
where
I: IntoIterator<Item = Entry>,
{
// Ledger verification needs to be parallelized, but we can't pull the whole
// thing into memory. We therefore chunk it.
let mut entry_height = *tail_idx as u64;
let mut tick_height = 0;
for entry in &tail[0..*tail_idx] {
tick_height += entry.is_tick() as u64
}
let mut id = start_hash;
for block in &entries.into_iter().chunks(VERIFY_BLOCK_SIZE) {
let block: Vec<_> = block.collect();
if !block.verify(&id) {
warn!("Ledger proof of history failed at entry: {}", entry_height);
return Err(BankError::LedgerVerificationFailed);
}
id = block.last().unwrap().id;
let entry_count = self.process_entries_tail(
&block,
tail,
tail_idx,
&mut tick_height,
leader_scheduler,
)?;
entry_height += entry_count;
}
Ok((tick_height, entry_height))
}
/// Process a full ledger.
pub fn process_ledger<I>(
&self,
entries: I,
leader_scheduler: &mut LeaderScheduler,
) -> Result<(u64, u64, Vec<Entry>)>
where
I: IntoIterator<Item = Entry>,
{
let mut entries = entries.into_iter();
// The first item in the ledger is required to be an entry with zero num_hashes,
// which implies its id can be used as the ledger's seed.
let entry0 = entries.next().expect("invalid ledger: empty");
// The second item in the ledger is a special transaction where the to and from
// fields are the same. That entry should be treated as a deposit, not a
// transfer to oneself.
let entry1 = entries
.next()
.expect("invalid ledger: need at least 2 entries");
{
let tx = &entry1.transactions[0];
assert!(SystemProgram::check_id(tx.program_id(0)), "Invalid ledger");
let instruction: SystemProgram = deserialize(tx.userdata(0)).unwrap();
let deposit = if let SystemProgram::Move { tokens } = instruction {
Some(tokens)
} else {
None
}.expect("invalid ledger, needs to start with a contract");
{
let mut accounts = self.accounts.write().unwrap();
let account = accounts
.entry(tx.account_keys[0])
.or_insert_with(Account::default);
account.tokens += deposit;
trace!("applied genesis payment {:?} => {:?}", deposit, account);
}
}
self.register_entry_id(&entry0.id);
self.register_entry_id(&entry1.id);
let entry1_id = entry1.id;
let mut tail = Vec::with_capacity(WINDOW_SIZE as usize);
tail.push(entry0);
tail.push(entry1);
let mut tail_idx = 2;
let (tick_height, entry_height) = self.process_blocks(
entry1_id,
entries,
&mut tail,
&mut tail_idx,
leader_scheduler,
)?;
// check if we need to rotate tail
if tail.len() == WINDOW_SIZE as usize {
tail.rotate_left(tail_idx)
}
Ok((tick_height, entry_height, tail))
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
pub fn transfer(
&self,
n: i64,
keypair: &Keypair,
to: Pubkey,
last_id: Hash,
) -> Result<Signature> {
let tx = Transaction::system_new(keypair, to, n, last_id);
let signature = tx.signature;
self.process_transaction(&tx).map(|_| signature)
}
pub fn read_balance(account: &Account) -> i64 {
if SystemProgram::check_id(&account.program_id) {
SystemProgram::get_balance(account)
} else if BudgetState::check_id(&account.program_id) {
BudgetState::get_balance(account)
} else {
account.tokens
}
}
/// Each contract would need to be able to introspect its own state
/// this is hard coded to the budget contract language
pub fn get_balance(&self, pubkey: &Pubkey) -> i64 {
self.get_account(pubkey)
.map(|x| Self::read_balance(&x))
.unwrap_or(0)
}
pub fn get_account(&self, pubkey: &Pubkey) -> Option<Account> {
let accounts = self
.accounts
.read()
.expect("'accounts' read lock in get_balance");
accounts.get(pubkey).cloned()
}
pub fn transaction_count(&self) -> usize {
self.transaction_count.load(Ordering::Relaxed)
}
pub fn get_signature_status(&self, signature: &Signature) -> Result<()> {
let last_ids = self.last_ids.read().unwrap();
for (signatures, _, _) in last_ids.sigs.values() {
if let Some(res) = signatures.get(signature) {
return res.clone();
}
}
Err(BankError::SignatureNotFound)
}
pub fn has_signature(&self, signature: &Signature) -> bool {
self.get_signature_status(signature) != Err(BankError::SignatureNotFound)
}
pub fn get_signature(&self, last_id: &Hash, signature: &Signature) -> Option<Result<()>> {
self.last_ids
.read()
.unwrap()
.sigs
.get(last_id)
.and_then(|sigs| sigs.0.get(signature).cloned())
}
/// Hash the `accounts` HashMap. This represents a validator's interpretation
/// of the ledger up to the `last_id`, to be sent back to the leader when voting.
pub fn hash_internal_state(&self) -> Hash {
let mut ordered_accounts = BTreeMap::new();
for (pubkey, account) in self.accounts.read().unwrap().iter() {
ordered_accounts.insert(*pubkey, account.clone());
}
hash(&serialize(&ordered_accounts).unwrap())
}
pub fn finality(&self) -> usize {
self.finality_time.load(Ordering::Relaxed)
}
pub fn set_finality(&self, finality: usize) {
self.finality_time.store(finality, Ordering::Relaxed);
}
pub fn add_account_subscription(
&self,
bank_sub_id: Pubkey,
pubkey: Pubkey,
sink: Sink<Account>,
) {
let mut subscriptions = self.account_subscriptions.write().unwrap();
if let Some(current_hashmap) = subscriptions.get_mut(&pubkey) {
current_hashmap.insert(bank_sub_id, sink);
return;
}
let mut hashmap = HashMap::new();
hashmap.insert(bank_sub_id, sink);
subscriptions.insert(pubkey, hashmap);
}
pub fn remove_account_subscription(&self, bank_sub_id: &Pubkey, pubkey: &Pubkey) -> bool {
let mut subscriptions = self.account_subscriptions.write().unwrap();
match subscriptions.get_mut(pubkey) {
Some(ref current_hashmap) if current_hashmap.len() == 1 => {}
Some(current_hashmap) => {
return current_hashmap.remove(bank_sub_id).is_some();
}
None => {
return false;
}
}
subscriptions.remove(pubkey).is_some()
}
fn check_account_subscriptions(&self, pubkey: &Pubkey, account: &Account) {
let subscriptions = self.account_subscriptions.read().unwrap();
if let Some(hashmap) = subscriptions.get(pubkey) {
for (_bank_sub_id, sink) in hashmap.iter() {
sink.notify(Ok(account.clone())).wait().unwrap();
}
}
}
pub fn add_signature_subscription(
&self,
bank_sub_id: Pubkey,
signature: Signature,
sink: Sink<RpcSignatureStatus>,
) {
let mut subscriptions = self.signature_subscriptions.write().unwrap();
if let Some(current_hashmap) = subscriptions.get_mut(&signature) {
current_hashmap.insert(bank_sub_id, sink);
return;
}
let mut hashmap = HashMap::new();
hashmap.insert(bank_sub_id, sink);
subscriptions.insert(signature, hashmap);
}
pub fn remove_signature_subscription(
&self,
bank_sub_id: &Pubkey,
signature: &Signature,
) -> bool {
let mut subscriptions = self.signature_subscriptions.write().unwrap();
match subscriptions.get_mut(signature) {
Some(ref current_hashmap) if current_hashmap.len() == 1 => {}
Some(current_hashmap) => {
return current_hashmap.remove(bank_sub_id).is_some();
}
None => {
return false;
}
}
subscriptions.remove(signature).is_some()
}
fn check_signature_subscriptions(&self, signature: &Signature, status: RpcSignatureStatus) {
let mut subscriptions = self.signature_subscriptions.write().unwrap();
if let Some(hashmap) = subscriptions.get(signature) {
for (_bank_sub_id, sink) in hashmap.iter() {
sink.notify(Ok(status)).wait().unwrap();
}
}
subscriptions.remove(&signature);
}
#[cfg(test)]
// Used to access accounts for things like controlling stake to control
// the eligible set of nodes for leader selection
pub fn accounts(&self) -> &RwLock<HashMap<Pubkey, Account>> {
&self.accounts
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use budget_program::BudgetState;
use entry::next_entry;
use entry::Entry;
use entry_writer::{self, EntryWriter};
use hash::hash;
use jsonrpc_macros::pubsub::{Subscriber, SubscriptionId};
use leader_scheduler::LeaderScheduler;
use ledger;
use logger;
use signature::Keypair;
use signature::{GenKeys, KeypairUtil};
use std;
use std::io::{BufReader, Cursor, Seek, SeekFrom};
use system_transaction::SystemTransaction;
use tokio::prelude::{Async, Stream};
use transaction::Instruction;
#[test]
fn test_bank_new() {
let mint = Mint::new(10_000);
let bank = Bank::new(&mint);
assert_eq!(bank.get_balance(&mint.pubkey()), 10_000);
}
#[test]
fn test_two_payments_to_one_party() {
let mint = Mint::new(10_000);
let pubkey = Keypair::new().pubkey();
let bank = Bank::new(&mint);
assert_eq!(bank.last_id(), mint.last_id());
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1_000);
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1_500);
assert_eq!(bank.transaction_count(), 2);
}
#[test]
fn test_one_source_two_tx_one_batch() {
let mint = Mint::new(1);
let key1 = Keypair::new().pubkey();
let key2 = Keypair::new().pubkey();
let bank = Bank::new(&mint);
assert_eq!(bank.last_id(), mint.last_id());
let t1 = Transaction::system_move(&mint.keypair(), key1, 1, mint.last_id(), 0);
let t2 = Transaction::system_move(&mint.keypair(), key2, 1, mint.last_id(), 0);
let res = bank.process_transactions(&vec![t1.clone(), t2.clone()]);
assert_eq!(res.len(), 2);
assert_eq!(res[0], Ok(()));
assert_eq!(res[1], Err(BankError::AccountInUse));
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
assert_eq!(bank.get_balance(&key1), 1);
assert_eq!(bank.get_balance(&key2), 0);
assert_eq!(bank.get_signature(&t1.last_id, &t1.signature), Some(Ok(())));
// TODO: Transactions that fail to pay a fee could be dropped silently
assert_eq!(
bank.get_signature(&t2.last_id, &t2.signature),
Some(Err(BankError::AccountInUse))
);
}
#[test]
fn test_one_tx_two_out_atomic_fail() {
let mint = Mint::new(1);
let key1 = Keypair::new().pubkey();
let key2 = Keypair::new().pubkey();
let bank = Bank::new(&mint);
let spend = SystemProgram::Move { tokens: 1 };
let instructions = vec![
Instruction {
program_ids_index: 0,
userdata: serialize(&spend).unwrap(),
accounts: vec![0, 1],
},
Instruction {
program_ids_index: 0,
userdata: serialize(&spend).unwrap(),
accounts: vec![0, 2],
},
];
let t1 = Transaction::new_with_instructions(
&mint.keypair(),
&[key1, key2],
mint.last_id(),
0,
vec![SystemProgram::id()],
instructions,
);
let res = bank.process_transactions(&vec![t1.clone()]);
assert_eq!(res.len(), 1);
assert_eq!(res[0], Err(BankError::ResultWithNegativeTokens(1)));
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
assert_eq!(bank.get_balance(&key1), 0);
assert_eq!(bank.get_balance(&key2), 0);
assert_eq!(
bank.get_signature(&t1.last_id, &t1.signature),
Some(Err(BankError::ResultWithNegativeTokens(1)))
);
}
#[test]
fn test_one_tx_two_out_atomic_pass() {
let mint = Mint::new(2);
let key1 = Keypair::new().pubkey();
let key2 = Keypair::new().pubkey();
let bank = Bank::new(&mint);
let t1 = Transaction::system_move_many(
&mint.keypair(),
&[(key1, 1), (key2, 1)],
mint.last_id(),
0,
);
let res = bank.process_transactions(&vec![t1.clone()]);
assert_eq!(res.len(), 1);
assert_eq!(res[0], Ok(()));
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
assert_eq!(bank.get_balance(&key1), 1);
assert_eq!(bank.get_balance(&key2), 1);
assert_eq!(bank.get_signature(&t1.last_id, &t1.signature), Some(Ok(())));
}
#[test]
fn test_negative_tokens() {
logger::setup();
let mint = Mint::new(1);
let pubkey = Keypair::new().pubkey();
let bank = Bank::new(&mint);
let res = bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id());
println!("{:?}", bank.get_account(&pubkey));
assert_matches!(res, Err(BankError::ResultWithNegativeTokens(0)));
assert_eq!(bank.transaction_count(), 0);
}
// TODO: This test demonstrates that fees are not paid when a program fails.
// See github issue 1157 (https://github.com/solana-labs/solana/issues/1157)
#[test]
fn test_detect_failed_duplicate_transactions_issue_1157() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let dest = Keypair::new();
// source with 0 contract context
let tx = Transaction::system_create(
&mint.keypair(),
dest.pubkey(),
mint.last_id(),
2,
0,
Pubkey::default(),
1,
);
let signature = tx.signature;
assert!(!bank.has_signature(&signature));
let res = bank.process_transaction(&tx);
// Result failed, but signature is registered
assert!(res.is_err());
assert!(bank.has_signature(&signature));
assert_matches!(
bank.get_signature_status(&signature),
Err(BankError::ResultWithNegativeTokens(0))
);
// The tokens didn't move, but the from address paid the transaction fee.
assert_eq!(bank.get_balance(&dest.pubkey()), 0);
// BUG: This should be the original balance minus the transaction fee.
//assert_eq!(bank.get_balance(&mint.pubkey()), 0);
}
#[test]
fn test_account_not_found() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let keypair = Keypair::new();
assert_eq!(
bank.transfer(1, &keypair, mint.pubkey(), mint.last_id()),
Err(BankError::AccountNotFound)
);
assert_eq!(bank.transaction_count(), 0);
}
#[test]
fn test_insufficient_funds() {
let mint = Mint::new(11_000);
let bank = Bank::new(&mint);
let pubkey = Keypair::new().pubkey();
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.transaction_count(), 1);
assert_eq!(bank.get_balance(&pubkey), 1_000);
assert_matches!(
bank.transfer(10_001, &mint.keypair(), pubkey, mint.last_id()),
Err(BankError::ResultWithNegativeTokens(0))
);
assert_eq!(bank.transaction_count(), 1);
let mint_pubkey = mint.keypair().pubkey();
assert_eq!(bank.get_balance(&mint_pubkey), 10_000);
assert_eq!(bank.get_balance(&pubkey), 1_000);
}
#[test]
fn test_transfer_to_newb() {
let mint = Mint::new(10_000);
let bank = Bank::new(&mint);
let pubkey = Keypair::new().pubkey();
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 500);
}
#[test]
fn test_duplicate_transaction_signature() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let signature = Signature::default();
assert_eq!(
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id()),
Ok(())
);
assert_eq!(
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id()),
Err(BankError::DuplicateSignature)
);
}
#[test]
fn test_clear_signatures() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let signature = Signature::default();
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id())
.unwrap();
bank.clear_signatures();
assert_eq!(
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id()),
Ok(())
);
}
#[test]
fn test_get_signature_status() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let signature = Signature::default();
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id())
.expect("reserve signature");
assert_eq!(bank.get_signature_status(&signature), Ok(()));
}
#[test]
fn test_has_signature() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let signature = Signature::default();
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id())
.expect("reserve signature");
assert!(bank.has_signature(&signature));
}
#[test]
fn test_reject_old_last_id() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let signature = Signature::default();
for i in 0..MAX_ENTRY_IDS {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
}
// Assert we're no longer able to use the oldest entry ID.
assert_eq!(
bank.reserve_signature_with_last_id_test(&signature, &mint.last_id()),
Err(BankError::LastIdNotFound)
);
}
#[test]
fn test_count_valid_ids() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let ids: Vec<_> = (0..MAX_ENTRY_IDS)
.map(|i| {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
last_id
}).collect();
assert_eq!(bank.count_valid_ids(&[]).len(), 0);
assert_eq!(bank.count_valid_ids(&[mint.last_id()]).len(), 0);
for (i, id) in bank.count_valid_ids(&ids).iter().enumerate() {
assert_eq!(id.0, i);
}
}
#[test]
fn test_debits_before_credits() {
let mint = Mint::new(2);
let bank = Bank::new(&mint);
let keypair = Keypair::new();
let tx0 = Transaction::system_new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
let tx1 = Transaction::system_new(&keypair, mint.pubkey(), 1, mint.last_id());
let txs = vec![tx0, tx1];
let results = bank.process_transactions(&txs);
assert!(results[1].is_err());
// Assert bad transactions aren't counted.
assert_eq!(bank.transaction_count(), 1);
}
#[test]
fn test_process_empty_entry_is_registered() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let keypair = Keypair::new();
let entry = next_entry(&mint.last_id(), 1, vec![]);
let tx = Transaction::system_new(&mint.keypair(), keypair.pubkey(), 1, entry.id);
// First, ensure the TX is rejected because of the unregistered last ID
assert_eq!(
bank.process_transaction(&tx),
Err(BankError::LastIdNotFound)
);
// Now ensure the TX is accepted despite pointing to the ID of an empty entry.
bank.process_entries(&[entry]).unwrap();
assert_eq!(bank.process_transaction(&tx), Ok(()));
}
#[test]
fn test_process_genesis() {
let mint = Mint::new(1);
let genesis = mint.create_entries();
let bank = Bank::default();
bank.process_ledger(genesis, &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
}
fn create_sample_block_with_next_entries_using_keypairs(
mint: &Mint,
keypairs: &[Keypair],
) -> impl Iterator<Item = Entry> {
let mut last_id = mint.last_id();
let mut hash = mint.last_id();
let mut entries: Vec<Entry> = vec![];
let num_hashes = 1;
for k in keypairs {
let txs = vec![Transaction::system_new(
&mint.keypair(),
k.pubkey(),
1,
last_id,
)];
let mut e = ledger::next_entries(&hash, 0, txs);
entries.append(&mut e);
hash = entries.last().unwrap().id;
let tick = Entry::new(&hash, num_hashes, vec![]);
hash = tick.id;
last_id = hash;
entries.push(tick);
}
entries.into_iter()
}
// create a ledger with tick entries every `ticks` entries
fn create_sample_block_with_ticks(
mint: &Mint,
length: usize,
ticks: usize,
) -> impl Iterator<Item = Entry> {
let mut entries = Vec::with_capacity(length);
let mut hash = mint.last_id();
let mut last_id = mint.last_id();
let num_hashes = 1;
for i in 0..length {
let keypair = Keypair::new();
let tx = Transaction::system_new(&mint.keypair(), keypair.pubkey(), 1, last_id);
let entry = Entry::new(&hash, num_hashes, vec![tx]);
hash = entry.id;
entries.push(entry);
if (i + 1) % ticks == 0 {
let tick = Entry::new(&hash, num_hashes, vec![]);
hash = tick.id;
last_id = hash;
entries.push(tick);
}
}
entries.into_iter()
}
fn create_sample_ledger(length: usize) -> (impl Iterator<Item = Entry>, Pubkey) {
let mint = Mint::new(length as i64 + 1);
let genesis = mint.create_entries();
let block = create_sample_block_with_ticks(&mint, length, length);
(genesis.into_iter().chain(block), mint.pubkey())
}
fn create_sample_ledger_with_mint_and_keypairs(
mint: &Mint,
keypairs: &[Keypair],
) -> impl Iterator<Item = Entry> {
let genesis = mint.create_entries();
let block = create_sample_block_with_next_entries_using_keypairs(mint, keypairs);
genesis.into_iter().chain(block)
}
#[test]
fn test_process_ledger_simple() {
let (ledger, pubkey) = create_sample_ledger(1);
let (ledger, dup) = ledger.tee();
let bank = Bank::default();
let (tick_height, ledger_height, tail) = bank
.process_ledger(ledger, &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1);
assert_eq!(ledger_height, 4);
assert_eq!(tick_height, 2);
assert_eq!(tail.len(), 4);
assert_eq!(tail, dup.collect_vec());
let last_entry = &tail[tail.len() - 1];
// last entry is a tick
assert_eq!(0, last_entry.transactions.len());
// tick is registered
assert_eq!(bank.last_id(), last_entry.id);
}
#[test]
fn test_process_ledger_around_window_size() {
// TODO: put me back in when Criterion is up
// for _ in 0..10 {
// let (ledger, _) = create_sample_ledger(WINDOW_SIZE as usize);
// let bank = Bank::default();
// let (_, _) = bank.process_ledger(ledger).unwrap();
// }
let window_size = WINDOW_SIZE as usize;
for entry_count in window_size - 3..window_size + 2 {
let (ledger, pubkey) = create_sample_ledger(entry_count);
let bank = Bank::default();
let (tick_height, ledger_height, tail) = bank
.process_ledger(ledger, &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1);
assert_eq!(ledger_height, entry_count as u64 + 3);
assert_eq!(tick_height, 2);
assert!(tail.len() <= window_size);
let last_entry = &tail[tail.len() - 1];
assert_eq!(bank.last_id(), last_entry.id);
}
}
// Write the given entries to a file and then return a file iterator to them.
fn to_file_iter(entries: impl Iterator<Item = Entry>) -> impl Iterator<Item = Entry> {
let mut file = Cursor::new(vec![]);
EntryWriter::write_entries(&mut file, entries).unwrap();
file.seek(SeekFrom::Start(0)).unwrap();
let reader = BufReader::new(file);
entry_writer::read_entries(reader).map(|x| x.unwrap())
}
#[test]
fn test_process_ledger_from_file() {
let (ledger, pubkey) = create_sample_ledger(1);
let ledger = to_file_iter(ledger);
let bank = Bank::default();
bank.process_ledger(ledger, &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1);
}
#[test]
fn test_process_ledger_from_files() {
let mint = Mint::new(2);
let genesis = to_file_iter(mint.create_entries().into_iter());
let block = to_file_iter(create_sample_block_with_ticks(&mint, 1, 1));
let bank = Bank::default();
bank.process_ledger(genesis.chain(block), &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
}
#[test]
fn test_hash_internal_state() {
let mint = Mint::new(2_000);
let seed = [0u8; 32];
let mut rnd = GenKeys::new(seed);
let keypairs = rnd.gen_n_keypairs(5);
let ledger0 = create_sample_ledger_with_mint_and_keypairs(&mint, &keypairs);
let ledger1 = create_sample_ledger_with_mint_and_keypairs(&mint, &keypairs);
let bank0 = Bank::default();
bank0
.process_ledger(ledger0, &mut LeaderScheduler::default())
.unwrap();
let bank1 = Bank::default();
bank1
.process_ledger(ledger1, &mut LeaderScheduler::default())
.unwrap();
let initial_state = bank0.hash_internal_state();
assert_eq!(bank1.hash_internal_state(), initial_state);
let pubkey = keypairs[0].pubkey();
bank0
.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_ne!(bank0.hash_internal_state(), initial_state);
bank1
.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
.unwrap();
assert_eq!(bank0.hash_internal_state(), bank1.hash_internal_state());
}
#[test]
fn test_finality() {
let def_bank = Bank::default();
assert_eq!(def_bank.finality(), std::usize::MAX);
def_bank.set_finality(90);
assert_eq!(def_bank.finality(), 90);
}
#[test]
fn test_interleaving_locks() {
let mint = Mint::new(3);
let bank = Bank::new(&mint);
let alice = Keypair::new();
let bob = Keypair::new();
let tx1 = Transaction::system_new(&mint.keypair(), alice.pubkey(), 1, mint.last_id());
let pay_alice = vec![tx1];
let locked_alice = bank.lock_accounts(&pay_alice);
let results_alice =
bank.execute_and_commit_transactions(&pay_alice, locked_alice, MAX_ENTRY_IDS);
assert_eq!(results_alice[0], Ok(()));
// try executing an interleaved transfer twice
assert_eq!(
bank.transfer(1, &mint.keypair(), bob.pubkey(), mint.last_id()),
Err(BankError::AccountInUse)
);
// the second time shoudl fail as well
// this verifies that `unlock_accounts` doesn't unlock `AccountInUse` accounts
assert_eq!(
bank.transfer(1, &mint.keypair(), bob.pubkey(), mint.last_id()),
Err(BankError::AccountInUse)
);
bank.unlock_accounts(&pay_alice, &results_alice);
assert_matches!(
bank.transfer(2, &mint.keypair(), bob.pubkey(), mint.last_id()),
Ok(_)
);
}
#[test]
fn test_bank_account_subscribe() {
let mint = Mint::new(100);
let bank = Bank::new(&mint);
let alice = Keypair::new();
let bank_sub_id = Keypair::new().pubkey();
let last_id = bank.last_id();
let tx = Transaction::system_create(
&mint.keypair(),
alice.pubkey(),
last_id,
1,
16,
BudgetState::id(),
0,
);
bank.process_transaction(&tx).unwrap();
let (subscriber, _id_receiver, mut transport_receiver) =
Subscriber::new_test("accountNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
bank.add_account_subscription(bank_sub_id, alice.pubkey(), sink);
assert!(
bank.account_subscriptions
.write()
.unwrap()
.contains_key(&alice.pubkey())
);
let account = bank.get_account(&alice.pubkey()).unwrap();
bank.check_account_subscriptions(&alice.pubkey(), &account);
let string = transport_receiver.poll();
assert!(string.is_ok());
if let Async::Ready(Some(response)) = string.unwrap() {
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"executable":false,"loader_program_id":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
assert_eq!(expected, response);
}
bank.remove_account_subscription(&bank_sub_id, &alice.pubkey());
assert!(
!bank
.account_subscriptions
.write()
.unwrap()
.contains_key(&alice.pubkey())
);
}
#[test]
fn test_bank_signature_subscribe() {
let mint = Mint::new(100);
let bank = Bank::new(&mint);
let alice = Keypair::new();
let bank_sub_id = Keypair::new().pubkey();
let last_id = bank.last_id();
let tx = Transaction::system_move(&mint.keypair(), alice.pubkey(), 20, last_id, 0);
let signature = tx.signature;
bank.process_transaction(&tx).unwrap();
let (subscriber, _id_receiver, mut transport_receiver) =
Subscriber::new_test("signatureNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
bank.add_signature_subscription(bank_sub_id, signature, sink);
assert!(
bank.signature_subscriptions
.write()
.unwrap()
.contains_key(&signature)
);
bank.check_signature_subscriptions(&signature, RpcSignatureStatus::Confirmed);
let string = transport_receiver.poll();
assert!(string.is_ok());
if let Async::Ready(Some(response)) = string.unwrap() {
let expected = format!(r#"{{"jsonrpc":"2.0","method":"signatureNotification","params":{{"result":"Confirmed","subscription":0}}}}"#);
assert_eq!(expected, response);
}
bank.remove_signature_subscription(&bank_sub_id, &signature);
assert!(
!bank
.signature_subscriptions
.write()
.unwrap()
.contains_key(&signature)
);
}
#[test]
fn test_first_err() {
assert_eq!(Bank::first_err(&[Ok(())]), Ok(()));
assert_eq!(
Bank::first_err(&[Ok(()), Err(BankError::DuplicateSignature)]),
Err(BankError::DuplicateSignature)
);
assert_eq!(
Bank::first_err(&[
Ok(()),
Err(BankError::DuplicateSignature),
Err(BankError::AccountInUse)
]),
Err(BankError::DuplicateSignature)
);
assert_eq!(
Bank::first_err(&[
Ok(()),
Err(BankError::AccountInUse),
Err(BankError::DuplicateSignature)
]),
Err(BankError::AccountInUse)
);
assert_eq!(
Bank::first_err(&[
Err(BankError::AccountInUse),
Ok(()),
Err(BankError::DuplicateSignature)
]),
Err(BankError::AccountInUse)
);
}
#[test]
fn test_par_process_entries_tick() {
let mint = Mint::new(1000);
let bank = Bank::new(&mint);
// ensure bank can process a tick
let tick = next_entry(&mint.last_id(), 1, vec![]);
assert_eq!(bank.par_process_entries(&[tick.clone()]), Ok(()));
assert_eq!(bank.last_id(), tick.id);
}
#[test]
fn test_par_process_entries_2_entries_collision() {
let mint = Mint::new(1000);
let bank = Bank::new(&mint);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let last_id = bank.last_id();
// ensure bank can process 2 entries that have a common account and no tick is registered
let tx = Transaction::system_new(&mint.keypair(), keypair1.pubkey(), 2, bank.last_id());
let entry_1 = next_entry(&last_id, 1, vec![tx]);
let tx = Transaction::system_new(&mint.keypair(), keypair2.pubkey(), 2, bank.last_id());
let entry_2 = next_entry(&entry_1.id, 1, vec![tx]);
assert_eq!(bank.par_process_entries(&[entry_1, entry_2]), Ok(()));
assert_eq!(bank.get_balance(&keypair1.pubkey()), 2);
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
assert_eq!(bank.last_id(), last_id);
}
#[test]
fn test_par_process_entries_2_entries_par() {
let mint = Mint::new(1000);
let bank = Bank::new(&mint);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let keypair4 = Keypair::new();
//load accounts
let tx = Transaction::system_new(&mint.keypair(), keypair1.pubkey(), 1, bank.last_id());
assert_eq!(bank.process_transaction(&tx), Ok(()));
let tx = Transaction::system_new(&mint.keypair(), keypair2.pubkey(), 1, bank.last_id());
assert_eq!(bank.process_transaction(&tx), Ok(()));
// ensure bank can process 2 entries that do not have a common account and no tick is registered
let last_id = bank.last_id();
let tx = Transaction::system_new(&keypair1, keypair3.pubkey(), 1, bank.last_id());
let entry_1 = next_entry(&last_id, 1, vec![tx]);
let tx = Transaction::system_new(&keypair2, keypair4.pubkey(), 1, bank.last_id());
let entry_2 = next_entry(&entry_1.id, 1, vec![tx]);
assert_eq!(bank.par_process_entries(&[entry_1, entry_2]), Ok(()));
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
assert_eq!(bank.last_id(), last_id);
}
#[test]
fn test_par_process_entries_2_entries_tick() {
let mint = Mint::new(1000);
let bank = Bank::new(&mint);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let keypair4 = Keypair::new();
//load accounts
let tx = Transaction::system_new(&mint.keypair(), keypair1.pubkey(), 1, bank.last_id());
assert_eq!(bank.process_transaction(&tx), Ok(()));
let tx = Transaction::system_new(&mint.keypair(), keypair2.pubkey(), 1, bank.last_id());
assert_eq!(bank.process_transaction(&tx), Ok(()));
let last_id = bank.last_id();
// ensure bank can process 2 entries that do not have a common account and tick is registered
let tx = Transaction::system_new(&keypair2, keypair3.pubkey(), 1, bank.last_id());
let entry_1 = next_entry(&last_id, 1, vec![tx]);
let new_tick = next_entry(&entry_1.id, 1, vec![]);
let tx = Transaction::system_new(&keypair1, keypair4.pubkey(), 1, new_tick.id);
let entry_2 = next_entry(&new_tick.id, 1, vec![tx]);
assert_eq!(
bank.par_process_entries(&[entry_1.clone(), new_tick.clone(), entry_2]),
Ok(())
);
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
assert_eq!(bank.last_id(), new_tick.id);
// ensure that errors are returned
assert_eq!(
bank.par_process_entries(&[entry_1]),
Err(BankError::AccountNotFound)
);
}
}
|
mod commands;
#[cfg_attr(test, macro_use)]
mod db;
mod model;
mod server;
mod snek;
use serenity::framework::standard::StandardFramework;
use serenity::prelude::*;
use simplelog::{Config, LogLevelFilter, SimpleLogger};
use failure::*;
use log::*;
use std::env;
use std::fs::File;
use std::io::Read;
use std::sync::Arc;
use std::thread;
use crate::db::*;
use crate::server::RealServerConnection;
use commands::servers::CacheEntry;
use evmap;
use chrono::{DateTime, Utc};
pub struct CacheWriteHandle(
pub evmap::WriteHandle<String, Box<(DateTime<Utc>, Option<CacheEntry>)>>,
);
pub struct CacheReadHandle(
pub evmap::ReadHandleFactory<String, Box<(DateTime<Utc>, Option<CacheEntry>)>>,
);
impl CacheReadHandle {
fn get_clone(&self, alias: &str) -> Option<Option<CacheEntry>> {
self.0.handle().get_and(alias, |values| {
if values.len() != 1 {
panic!()
} else {
(*values[0]).1.clone()
}
})
}
}
struct Handler;
impl EventHandler for Handler {}
fn main() {
if let Err(e) = do_main() {
info!("server crashed with error {:?}", e)
}
}
fn do_main() -> Result<(), Error> {
SimpleLogger::init(LogLevelFilter::Info, Config::default())?;
info!("Logger initialised");
let mut discord_client = create_discord_client().context("Creating discord client")?;
if let Err(why) = discord_client.start() {
error!("Client error: {:?}", why);
}
Ok(())
}
fn read_token() -> Result<String, Error> {
let mut token_file = File::open("resources/token").context("Opening file 'resources/token'")?;
let mut temp_token = String::new();
token_file
.read_to_string(&mut temp_token)
.context("Reading contents of file")?;
info!("Read discord bot token");
Ok(temp_token)
}
struct DetailsReadHandleKey;
impl typemap::Key for DetailsReadHandleKey {
type Value = CacheReadHandle;
}
fn create_discord_client() -> Result<Client, Error> {
let token = read_token().context("Reading token file")?;
let path = env::current_dir()?;
let path = path.join("resources/dom5bot.db");
let db_conn =
DbConnection::new(&path).context(format!("Opening database '{}'", path.display()))?;
info!("Opened database connection");
let (reader, write) = evmap::new();
let mut discord_client = Client::new(&token, Handler).map_err(SyncFailure::new)?;
info!("Created discord client");
{
let mut data = discord_client.data.lock();
data.insert::<DbConnectionKey>(db_conn.clone());
data.insert::<DetailsReadHandleKey>(CacheReadHandle(reader.factory()));
}
use crate::commands::servers::WithServersCommands;
use crate::commands::WithSearchCommands;
discord_client.with_framework(
StandardFramework::new()
.configure(|c| c.prefix("!"))
.simple_bucket("simple", 1)
.with_search_commands("simple")
.with_servers_commands::<RealServerConnection>("simple")
.help(|_, msg, _, _, _| commands::help(msg))
.before(|_, msg, _| {
info!("received message {:?}", msg);
!msg.author.bot // ignore bots
})
.after(|_ctx, msg, _cmd_name, result| {
if let Err(err) = result {
print!("command error: ");
let text = format!("ERROR: {}", err.0);
info!("replying with {}", text);
let _ = msg.reply(&text);
}
}),
);
info!("Configured discord client");
let writer_mutex = Arc::new(Mutex::new(CacheWriteHandle(write)));
let writer_mutex_clone = writer_mutex.clone();
thread::spawn(move || {
crate::commands::servers::turn_check::update_details_cache_loop(
db_conn.clone(),
writer_mutex_clone,
);
});
// thread::spawn(move || {
// crate::commands::servers::turn_check::remove_old_entries_from_cache_loop(writer_mutex);
// });
// start listening for events by starting a single shard
Ok(discord_client)
}
|
use nom::IResult;
use nom::be_u32;
use nom::be_u64;
pub const HEADER_MAGIC: [u8;4] = [0xd0, 0x0d, 0xfe, 0xed];
pub const VERSION: u32 = 17;
pub const LAST_COMP_VERSION: u32 = 16;
#[derive(Debug,PartialEq,Eq,Copy,Clone)]
pub struct Header {
pub magic: [u8;4],
pub totalsize: u32,
pub off_dt_struct: u32,
pub off_dt_strings: u32,
pub off_mem_rsvmap: u32,
pub version: u32,
pub last_comp_version: u32,
pub boot_cpuid_phys: u32,
pub size_dt_strings: u32,
pub size_dt_struct: u32,
}
pub fn parse_header(i: &[u8]) -> IResult<&[u8], Header> {
do_parse!(i,
tag!(&HEADER_MAGIC) >>
totalsize: be_u32 >>
off_dt_struct: be_u32 >>
off_dt_strings: be_u32 >>
off_mem_rsvmap: be_u32 >>
version: be_u32 >>
last_comp_version: be_u32 >>
boot_cpuid_phys: be_u32 >>
size_dt_strings: be_u32 >>
size_dt_struct: be_u32 >>
(Header {
magic: HEADER_MAGIC,
totalsize: totalsize,
off_dt_struct: off_dt_struct,
off_dt_strings: off_dt_strings,
off_mem_rsvmap: off_mem_rsvmap,
version: version,
last_comp_version: last_comp_version,
boot_cpuid_phys: boot_cpuid_phys,
size_dt_strings: size_dt_strings,
size_dt_struct: size_dt_struct,
})
)
}
fn residue_to_align(unaligned: usize) -> usize {
(4 - (unaligned % 4)) % 4
}
#[derive(Debug,PartialEq,Eq,Copy,Clone)]
pub enum Token<'a> {
BeginNode{name: &'a [u8]},
EndNode,
Prop{len: u32, nameoff: u32, value: &'a [u8]},
Nop,
End,
}
fn parse_token_begin_node(i: &[u8]) -> IResult<&[u8], Token> {
do_parse!(i,
name: take_until_and_consume!("\x00") >>
take!(residue_to_align(name.len() + 1)) >>
(Token::BeginNode {
name: name,
})
)
}
fn parse_token_end_node(i: &[u8]) -> IResult<&[u8], Token> {
Ok((i, Token::EndNode))
}
fn parse_token_prop(i: &[u8]) -> IResult<&[u8], Token> {
do_parse!(i,
len: be_u32 >>
nameoff: be_u32 >>
value: take!(len) >>
take!(residue_to_align(len as usize)) >>
(Token::Prop {
len: len,
nameoff: nameoff,
value: value,
})
)
}
fn parse_token_nop(i: &[u8]) -> IResult<&[u8], Token> {
Ok((i, Token::Nop))
}
fn parse_token_end(i: &[u8]) -> IResult<&[u8], Token> {
Ok((i, Token::End))
}
pub fn parse_token(i: &[u8]) -> IResult<&[u8], Token> {
switch!(i, be_u32,
0x0000_0001 => call!(parse_token_begin_node) |
0x0000_0002 => call!(parse_token_end_node) |
0x0000_0003 => call!(parse_token_prop) |
0x0000_0004 => call!(parse_token_nop) |
0x0000_0009 => call!(parse_token_end)
)
}
#[derive(Debug,PartialEq,Eq,Copy,Clone)]
pub struct ReserveEntry {
pub address: u64,
pub size: u64,
}
pub fn parse_reserve_entry(i: &[u8]) -> IResult<&[u8], ReserveEntry> {
do_parse!(i,
address: be_u64 >>
size: be_u64 >>
(ReserveEntry {
address: address,
size: size,
})
)
}
pub struct ReserveEntryIterator<'a> {
slice: &'a [u8],
}
impl<'a> ReserveEntryIterator<'a> {
fn new(fdt: &'a Fdt<'a>) -> ReserveEntryIterator<'a> {
let base = fdt.header.off_mem_rsvmap as usize;
ReserveEntryIterator {
slice: &fdt.mem[base..],
}
}
}
impl<'a> Iterator for ReserveEntryIterator<'a> {
type Item = ReserveEntry;
fn next(&mut self) -> Option<ReserveEntry> {
match parse_reserve_entry(self.slice) {
Ok((remainder, entry)) => {
if (entry.address == 0) && (entry.size == 0) {
//We intentionally don't ratchet up the slice here so that
// subsequent calls continue to return None
return None;
}
self.slice = remainder;
Some(entry)
},
_ => None,
}
}
}
pub struct TokenIterator<'a> {
slice: &'a [u8],
}
impl<'a> TokenIterator<'a> {
fn new(fdt: &'a Fdt<'a>) -> TokenIterator<'a> {
let base = fdt.header.off_dt_struct as usize;
let end = base + (fdt.header.size_dt_struct as usize);
TokenIterator {
slice: &fdt.mem[base..end],
}
}
}
impl<'a> Iterator for TokenIterator<'a> {
type Item = Token<'a>;
fn next(&mut self) -> Option<Token<'a>> {
// Not strictly required as this will be covered by IResult::Incomplete,
// but I don't really like going down exceptional pathways in the
// the normal code flow
if self.slice.is_empty() {
return None;
}
match parse_token(self.slice) {
Ok((remainder, token)) => {
self.slice = remainder;
Some(token)
},
_ => None
}
}
}
#[derive(Debug,PartialEq,Eq)]
pub enum FdtParseError {
TotalsizeLargerThanMem,
}
#[derive(Debug,PartialEq,Eq)]
pub enum StringMarshalError {
ValidationError(::std::str::Utf8Error),
OutOfRange,
}
impl From<::std::str::Utf8Error> for StringMarshalError {
fn from(error: ::std::str::Utf8Error) -> StringMarshalError {
StringMarshalError::ValidationError(error)
}
}
pub struct Fdt<'a> {
header: Header,
mem: &'a [u8],
}
impl<'a> Fdt<'a> {
pub fn new(header: &Header, mem: &'a [u8]) -> Result<Fdt<'a>, FdtParseError> {
if (header.totalsize as usize) < mem.len() {
return Err(FdtParseError::TotalsizeLargerThanMem);
}
Ok(Fdt {
header: header.clone(),
mem: mem,
})
}
pub fn mem_rsvmap_iter(&self) -> ReserveEntryIterator {
ReserveEntryIterator::new(self)
}
pub fn token_iter(&self) -> TokenIterator {
TokenIterator::new(self)
}
fn str_region_slice(&self) -> &'a [u8] {
let base = self.header.off_dt_strings as usize;
let end = base + self.header.size_dt_strings as usize;
&self.mem[base..end]
}
pub fn str_from_off(&self, nameoff: u32) -> Result<&'a str, StringMarshalError> {
let region = self.str_region_slice();
let base = nameoff as usize;
let mut end = base;
loop {
if end >= region.len() {
return Err(StringMarshalError::OutOfRange);
}
if region[end] == ('\0' as u8) {
break;
}
end += 1;
}
match ::std::str::from_utf8(®ion[base..end]) {
Ok(marshalled_string) => Ok(marshalled_string),
Err(err) => Err(StringMarshalError::ValidationError(err)),
}
}
}
#[cfg(test)]
mod tests {
use super::parse_header;
use super::parse_token;
use super::Header;
use super::Token;
use nom::ErrorKind;
use nom::IResult;
const EMPTY: &'static [u8] = b"";
#[test]
fn parse_header_good() {
let test_header_bytes: [u8;40] = [
0xd0,0x0d,0xfe,0xed, 0x00,0x01,0x00,0x00,
0x00,0x00,0x00,0x40, 0x00,0x00,0x1b,0xdc,
0x00,0x00,0x00,0x30, 0x00,0x00,0x00,0x11,
0x00,0x00,0x00,0x10, 0x00,0x00,0x00,0x00,
0x00,0x00,0x01,0xa1, 0x00,0x00,0x1b,0x9c,
];
assert_eq!(parse_header(&test_header_bytes),
IResult::Done(EMPTY, Header {
magic: super::HEADER_MAGIC,
totalsize: 0x0001_0000,
off_dt_struct: 0x0000_0040,
off_dt_strings: 0x0000_1bdc,
off_mem_rsvmap: 0x0000_0030,
version: super::VERSION,
last_comp_version: super::LAST_COMP_VERSION,
boot_cpuid_phys: 0x0000_0000,
size_dt_strings: 0x0000_01a1,
size_dt_struct: 0x0000_1b9c,
}));
}
#[test]
fn parse_header_bad_magic() {
let test_header_bytes: [u8;4] = [
0xFF, 0xFF, 0xFF, 0xFF,
];
assert_eq!(parse_header(&test_header_bytes),
IResult::Error(ErrorKind::Tag));
}
#[test]
fn parse_token_begin_node_full_str() {
let test_token: [u8;12] = [
0x00, 0x00, 0x00, 0x01,
//t e s t i n g \0
0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x00,
];
assert_eq!(parse_token(&test_token),
IResult::Done(EMPTY, Token::BeginNode{name: b"testing"})
);
}
#[test]
fn parse_token_begin_node_partially_str() {
let test_token: [u8;12] = [
0x00, 0x00, 0x00, 0x01,
//h e l l o \0
0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x00, 0x00, 0x00,
];
assert_eq!(parse_token(&test_token),
IResult::Done(EMPTY, Token::BeginNode{name: b"hello"})
);
}
#[test]
fn parse_token_end_node_good() {
let test_token: [u8; 4] = [
0x00, 0x00, 0x00, 0x02,
];
assert_eq!(parse_token(&test_token),
IResult::Done(EMPTY, Token::EndNode)
);
}
#[test]
fn parse_token_prop_full_value() {
let test_token: [u8; 16] = [
0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x04,
0x12, 0x34, 0x56, 0x78,
0xaa, 0xbb, 0xcc, 0xdd,
];
assert_eq!(parse_token(&test_token),
IResult::Done(EMPTY, Token::Prop{
len: 4,
nameoff: 0x12345678,
value: b"\xaa\xbb\xcc\xdd",
})
);
}
#[test]
fn parse_token_prop_partially_value() {
let test_token: [u8; 16] = [
0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x02,
0xaa, 0xbb, 0xcc, 0xdd,
0x32, 0x18, 0x00, 0x00,
];
assert_eq!(parse_token(&test_token),
IResult::Done(EMPTY, Token::Prop{
len: 2,
nameoff: 0xaabbccdd,
value: b"\x32\x18",
})
);
}
#[test]
fn parse_token_nop_good() {
let test_token: [u8; 4] = [
0x00, 0x00, 0x00, 0x04,
];
assert_eq!(parse_token(&test_token),
IResult::Done(EMPTY, Token::Nop)
);
}
#[test]
fn parse_token_end_good() {
let test_token: [u8; 4] = [
0x00, 0x00, 0x00, 0x09,
];
assert_eq!(parse_token(&test_token),
IResult::Done(EMPTY, Token::End)
);
}
}
|
//! Compilers should really have intrinsics for making system calls. They're
//! much like regular calls, with custom calling conventions, and calling
//! conventions are otherwise the compiler's job. But for now, use inline asm.
//!
//! # Safety
//!
//! This contains the `asm` statements performing the syscall instructions.
#![allow(unsafe_code)]
#![allow(dead_code)]
#![allow(unused_imports)]
// When inline asm is available, use it.
#[cfg(all(linux_raw_inline_asm, target_arch = "aarch64"))]
pub(crate) mod aarch64;
#[cfg(all(linux_raw_inline_asm, target_arch = "x86"))]
pub(crate) mod x86;
#[cfg(all(linux_raw_inline_asm, target_arch = "x86_64"))]
pub(crate) mod x86_64;
#[cfg(all(linux_raw_inline_asm, target_arch = "aarch64"))]
pub(crate) use self::aarch64 as asm;
#[cfg(all(linux_raw_inline_asm, target_arch = "x86"))]
pub(crate) use self::x86 as asm;
#[cfg(all(linux_raw_inline_asm, target_arch = "x86_64"))]
pub(crate) use self::x86_64 as asm;
// When inline asm isn't available, use out-of-line asm.
#[cfg(not(linux_raw_inline_asm))]
pub(crate) mod outline;
#[cfg(not(linux_raw_inline_asm))]
pub(crate) use self::outline as asm;
// On aarch64 and x86_64, the architecture syscall instruction is fast, so
// use it directly.
#[cfg(target_arch = "aarch64")]
pub(crate) use self::asm as choose;
#[cfg(target_arch = "x86_64")]
pub(crate) use self::asm as choose;
// On x86, use vDSO wrappers. We could use the architecture syscall
// instruction, but the vDSO kernel_vsyscall mechanism is much faster.
#[cfg(target_arch = "x86")]
pub(crate) use super::vdso_wrappers::x86_via_vdso as choose;
//#[cfg(target_arch = "x86")]
//pub(crate) use self::asm as choose;
|
use crate::registry::Registry;
#[derive(Copy, Clone, Debug)]
pub struct Link {
hash: u128,
}
pub struct Depot<'registry> {
registry: &'registry Registry,
}
impl<'registry> Depot<'registry> {
pub fn new(registry: &'registry Registry) -> Self {
Self { registry }
}
}
|
#[doc = "Register `IMR` reader"]
pub type R = crate::R<IMR_SPEC>;
#[doc = "Register `IMR` writer"]
pub type W = crate::W<IMR_SPEC>;
#[doc = "Field `TXISIE` reader - TXIS interrupt enable"]
pub type TXISIE_R = crate::BitReader;
#[doc = "Field `TXISIE` writer - TXIS interrupt enable"]
pub type TXISIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TXMSGDISCIE` reader - TXMSGDISC interrupt enable"]
pub type TXMSGDISCIE_R = crate::BitReader;
#[doc = "Field `TXMSGDISCIE` writer - TXMSGDISC interrupt enable"]
pub type TXMSGDISCIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TXMSGSENTIE` reader - TXMSGSENT interrupt enable"]
pub type TXMSGSENTIE_R = crate::BitReader;
#[doc = "Field `TXMSGSENTIE` writer - TXMSGSENT interrupt enable"]
pub type TXMSGSENTIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TXMSGABTIE` reader - TXMSGABT interrupt enable"]
pub type TXMSGABTIE_R = crate::BitReader;
#[doc = "Field `TXMSGABTIE` writer - TXMSGABT interrupt enable"]
pub type TXMSGABTIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `HRSTDISCIE` reader - HRSTDISC interrupt enable"]
pub type HRSTDISCIE_R = crate::BitReader;
#[doc = "Field `HRSTDISCIE` writer - HRSTDISC interrupt enable"]
pub type HRSTDISCIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `HRSTSENTIE` reader - HRSTSENT interrupt enable"]
pub type HRSTSENTIE_R = crate::BitReader;
#[doc = "Field `HRSTSENTIE` writer - HRSTSENT interrupt enable"]
pub type HRSTSENTIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TXUNDIE` reader - TXUND interrupt enable"]
pub type TXUNDIE_R = crate::BitReader;
#[doc = "Field `TXUNDIE` writer - TXUND interrupt enable"]
pub type TXUNDIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RXNEIE` reader - RXNE interrupt enable"]
pub type RXNEIE_R = crate::BitReader;
#[doc = "Field `RXNEIE` writer - RXNE interrupt enable"]
pub type RXNEIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RXORDDETIE` reader - RXORDDET interrupt enable"]
pub type RXORDDETIE_R = crate::BitReader;
#[doc = "Field `RXORDDETIE` writer - RXORDDET interrupt enable"]
pub type RXORDDETIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RXHRSTDETIE` reader - RXHRSTDET interrupt enable"]
pub type RXHRSTDETIE_R = crate::BitReader;
#[doc = "Field `RXHRSTDETIE` writer - RXHRSTDET interrupt enable"]
pub type RXHRSTDETIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RXOVRIE` reader - RXOVR interrupt enable"]
pub type RXOVRIE_R = crate::BitReader;
#[doc = "Field `RXOVRIE` writer - RXOVR interrupt enable"]
pub type RXOVRIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RXMSGENDIE` reader - RXMSGEND interrupt enable"]
pub type RXMSGENDIE_R = crate::BitReader;
#[doc = "Field `RXMSGENDIE` writer - RXMSGEND interrupt enable"]
pub type RXMSGENDIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TYPECEVT1IE` reader - TYPECEVT1 interrupt enable"]
pub type TYPECEVT1IE_R = crate::BitReader;
#[doc = "Field `TYPECEVT1IE` writer - TYPECEVT1 interrupt enable"]
pub type TYPECEVT1IE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TYPECEVT2IE` reader - TYPECEVT2 interrupt enable"]
pub type TYPECEVT2IE_R = crate::BitReader;
#[doc = "Field `TYPECEVT2IE` writer - TYPECEVT2 interrupt enable"]
pub type TYPECEVT2IE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FRSEVTIE` reader - FRSEVT interrupt enable"]
pub type FRSEVTIE_R = crate::BitReader;
impl R {
#[doc = "Bit 0 - TXIS interrupt enable"]
#[inline(always)]
pub fn txisie(&self) -> TXISIE_R {
TXISIE_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - TXMSGDISC interrupt enable"]
#[inline(always)]
pub fn txmsgdiscie(&self) -> TXMSGDISCIE_R {
TXMSGDISCIE_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - TXMSGSENT interrupt enable"]
#[inline(always)]
pub fn txmsgsentie(&self) -> TXMSGSENTIE_R {
TXMSGSENTIE_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - TXMSGABT interrupt enable"]
#[inline(always)]
pub fn txmsgabtie(&self) -> TXMSGABTIE_R {
TXMSGABTIE_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - HRSTDISC interrupt enable"]
#[inline(always)]
pub fn hrstdiscie(&self) -> HRSTDISCIE_R {
HRSTDISCIE_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - HRSTSENT interrupt enable"]
#[inline(always)]
pub fn hrstsentie(&self) -> HRSTSENTIE_R {
HRSTSENTIE_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - TXUND interrupt enable"]
#[inline(always)]
pub fn txundie(&self) -> TXUNDIE_R {
TXUNDIE_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 8 - RXNE interrupt enable"]
#[inline(always)]
pub fn rxneie(&self) -> RXNEIE_R {
RXNEIE_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - RXORDDET interrupt enable"]
#[inline(always)]
pub fn rxorddetie(&self) -> RXORDDETIE_R {
RXORDDETIE_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - RXHRSTDET interrupt enable"]
#[inline(always)]
pub fn rxhrstdetie(&self) -> RXHRSTDETIE_R {
RXHRSTDETIE_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - RXOVR interrupt enable"]
#[inline(always)]
pub fn rxovrie(&self) -> RXOVRIE_R {
RXOVRIE_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - RXMSGEND interrupt enable"]
#[inline(always)]
pub fn rxmsgendie(&self) -> RXMSGENDIE_R {
RXMSGENDIE_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 14 - TYPECEVT1 interrupt enable"]
#[inline(always)]
pub fn typecevt1ie(&self) -> TYPECEVT1IE_R {
TYPECEVT1IE_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 15 - TYPECEVT2 interrupt enable"]
#[inline(always)]
pub fn typecevt2ie(&self) -> TYPECEVT2IE_R {
TYPECEVT2IE_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bit 20 - FRSEVT interrupt enable"]
#[inline(always)]
pub fn frsevtie(&self) -> FRSEVTIE_R {
FRSEVTIE_R::new(((self.bits >> 20) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - TXIS interrupt enable"]
#[inline(always)]
#[must_use]
pub fn txisie(&mut self) -> TXISIE_W<IMR_SPEC, 0> {
TXISIE_W::new(self)
}
#[doc = "Bit 1 - TXMSGDISC interrupt enable"]
#[inline(always)]
#[must_use]
pub fn txmsgdiscie(&mut self) -> TXMSGDISCIE_W<IMR_SPEC, 1> {
TXMSGDISCIE_W::new(self)
}
#[doc = "Bit 2 - TXMSGSENT interrupt enable"]
#[inline(always)]
#[must_use]
pub fn txmsgsentie(&mut self) -> TXMSGSENTIE_W<IMR_SPEC, 2> {
TXMSGSENTIE_W::new(self)
}
#[doc = "Bit 3 - TXMSGABT interrupt enable"]
#[inline(always)]
#[must_use]
pub fn txmsgabtie(&mut self) -> TXMSGABTIE_W<IMR_SPEC, 3> {
TXMSGABTIE_W::new(self)
}
#[doc = "Bit 4 - HRSTDISC interrupt enable"]
#[inline(always)]
#[must_use]
pub fn hrstdiscie(&mut self) -> HRSTDISCIE_W<IMR_SPEC, 4> {
HRSTDISCIE_W::new(self)
}
#[doc = "Bit 5 - HRSTSENT interrupt enable"]
#[inline(always)]
#[must_use]
pub fn hrstsentie(&mut self) -> HRSTSENTIE_W<IMR_SPEC, 5> {
HRSTSENTIE_W::new(self)
}
#[doc = "Bit 6 - TXUND interrupt enable"]
#[inline(always)]
#[must_use]
pub fn txundie(&mut self) -> TXUNDIE_W<IMR_SPEC, 6> {
TXUNDIE_W::new(self)
}
#[doc = "Bit 8 - RXNE interrupt enable"]
#[inline(always)]
#[must_use]
pub fn rxneie(&mut self) -> RXNEIE_W<IMR_SPEC, 8> {
RXNEIE_W::new(self)
}
#[doc = "Bit 9 - RXORDDET interrupt enable"]
#[inline(always)]
#[must_use]
pub fn rxorddetie(&mut self) -> RXORDDETIE_W<IMR_SPEC, 9> {
RXORDDETIE_W::new(self)
}
#[doc = "Bit 10 - RXHRSTDET interrupt enable"]
#[inline(always)]
#[must_use]
pub fn rxhrstdetie(&mut self) -> RXHRSTDETIE_W<IMR_SPEC, 10> {
RXHRSTDETIE_W::new(self)
}
#[doc = "Bit 11 - RXOVR interrupt enable"]
#[inline(always)]
#[must_use]
pub fn rxovrie(&mut self) -> RXOVRIE_W<IMR_SPEC, 11> {
RXOVRIE_W::new(self)
}
#[doc = "Bit 12 - RXMSGEND interrupt enable"]
#[inline(always)]
#[must_use]
pub fn rxmsgendie(&mut self) -> RXMSGENDIE_W<IMR_SPEC, 12> {
RXMSGENDIE_W::new(self)
}
#[doc = "Bit 14 - TYPECEVT1 interrupt enable"]
#[inline(always)]
#[must_use]
pub fn typecevt1ie(&mut self) -> TYPECEVT1IE_W<IMR_SPEC, 14> {
TYPECEVT1IE_W::new(self)
}
#[doc = "Bit 15 - TYPECEVT2 interrupt enable"]
#[inline(always)]
#[must_use]
pub fn typecevt2ie(&mut self) -> TYPECEVT2IE_W<IMR_SPEC, 15> {
TYPECEVT2IE_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "UCPD interrupt mask register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`imr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`imr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct IMR_SPEC;
impl crate::RegisterSpec for IMR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`imr::R`](R) reader structure"]
impl crate::Readable for IMR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`imr::W`](W) writer structure"]
impl crate::Writable for IMR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets IMR to value 0"]
impl crate::Resettable for IMR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
//! Types for handling errors in reading.
use lex;
use parse;
use parse::expr::Expr;
use std::error;
use std::fmt;
use std::result;
/// The result of reading a string.
pub type Result = result::Result<Expr, Error>;
/// Indicates an error in lexing or parsing.
#[derive(Debug, PartialEq)]
pub enum Error {
/// Indicates an error in lexing.
LexError(lex::Error),
/// Indicates an error in parsing.
ParseError(parse::Error),
}
impl error::Error for Error {
/// Get a simple text description of what each error means.
fn description(&self) -> &str {
match *self {
Error::LexError(..) => "an error occured during lexing",
Error::ParseError(..) => "an error occured during parsing",
}
}
/// The underlying cause of the error.
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::LexError(ref error) => Some(error),
Error::ParseError(ref error) => Some(error),
}
}
}
impl fmt::Display for Error {
/// Print detailed error information.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::LexError(ref error) => write!(f, "{}", error),
Error::ParseError(ref error) => write!(f, "{}", error),
}
}
}
impl From<lex::Error> for Error {
/// Convert from a lex::Error into a read::Error.
fn from(err: lex::Error) -> Error {
Error::LexError(err)
}
}
impl From<parse::Error> for Error {
/// Convert from a parse::Error into a read::Error.
fn from(err: parse::Error) -> Error {
Error::ParseError(err)
}
}
|
use std::io::stdin;
fn main() {
print!("Type any string: ");
let mut line = String::new();
stdin().read_line(&mut line).unwrap();
let input = line[..].trim();
println!("{}", input.chars().next_back().unwrap());
}
|
use std::collections::BinaryHeap;
use crate::{
components::{EntityComponent, TerrainComponent},
indices::RoomPosition,
prelude::{Axial, Hexagon, View},
profile,
tables::hex_grid::HexGrid,
};
use tracing::{debug, trace};
use super::{is_walkable, Node, PathFindingError};
const VISITED_FROM: u8 = 1 << 0;
const VISITED_TO: u8 = 1 << 1;
type Bounds = [Axial; 2];
/// The goal of the pathfinder to approach `end` at a distance of `distance`.
///
/// So we'll initialize a ring of nodes with the center `end` and radius `distance`.
fn init_end(
[begin, end]: Bounds,
distance: u32,
entities: View<Axial, EntityComponent>,
terrain: View<Axial, TerrainComponent>,
open_set: &mut BinaryHeap<Node>,
visited: &mut HexGrid<u8>,
closed_set: &mut HexGrid<Node>,
) {
if distance == 0 {
// `iter_edge` returns empty if radius is 0 so push the pos here
let pos = end;
if let Some(v) = visited.at_mut(pos) {
*v |= VISITED_TO;
let n = Node::new(pos, pos, pos.hex_distance(begin) as i32, 0);
open_set.push(n.clone());
closed_set[pos] = n;
}
} else {
let bounds = Hexagon::new(end, distance as i32);
for pos in bounds.iter_edge().filter(|pos| {
terrain
.at(*pos)
.map(|TerrainComponent(t)| t.is_walkable())
.unwrap_or(false)
&& !entities.contains_key(*pos)
}) {
debug_assert_eq!(pos.hex_distance(end), distance);
if let Some(v) = visited.at_mut(pos) {
*v |= VISITED_TO;
let n = Node::new(pos, pos, pos.hex_distance(begin) as i32, 0);
open_set.push(n.clone());
closed_set[pos] = n;
}
}
}
}
fn reconstruct_path(
current: Axial,
start: Axial,
end: Axial,
distance: u32,
path: &mut Vec<RoomPosition>,
closed_set_f: &HexGrid<Node>,
closed_set_t: &HexGrid<Node>,
) {
// reconstruct 'to'
//
// parents move towards `end`
{
let i = path.len();
// copy current
let mut current = current;
// 'current' will be pushed by the second loop
while current.hex_distance(end) > distance {
current = closed_set_t[current].parent;
path.push(RoomPosition(current));
}
path[i..].reverse();
}
// reconstruct 'from'
//
// parents move towards `start`
let mut current = current;
while current != start {
path.push(RoomPosition(current));
current = closed_set_f[current].parent;
}
}
/// Returns the remaining steps.
///
/// The algorithm is a two-way A*, where we start A* from both the `from` and the `to` points and
/// exit when they meet.
/// This should reduce the size of the graph we need to traverse in the general case.
pub fn find_path_in_room(
from: Axial,
to: Axial,
distance: u32,
(positions, terrain): (View<Axial, EntityComponent>, View<Axial, TerrainComponent>),
max_steps: u32,
path: &mut Vec<RoomPosition>,
) -> Result<u32, PathFindingError> {
profile!("find_path_in_room");
trace!("find_path_in_room from {:?} to {:?}", from, to);
if from.hex_distance(to) <= distance {
return Ok(max_steps);
}
let end = to;
let mut remaining_steps = max_steps;
let room_radius = terrain.bounds().radius;
debug_assert!(room_radius >= 0);
let mut closed_set_f = HexGrid::<Node>::new(room_radius as usize);
let mut open_set_f = BinaryHeap::with_capacity(remaining_steps as usize);
let mut closed_set_t = HexGrid::<Node>::new(room_radius as usize);
let mut open_set_t = BinaryHeap::with_capacity(remaining_steps as usize);
let mut open_set_visited = HexGrid::<u8>::new(room_radius as usize);
init_end(
[from, end],
distance,
positions,
terrain,
&mut open_set_t,
&mut open_set_visited,
&mut closed_set_t,
);
let mut current_f = Node::new(from, from, from.hex_distance(end) as i32, 0);
closed_set_f
.insert(current_f.pos, current_f.clone())
.unwrap();
open_set_f.push(current_f.clone());
while !open_set_f.is_empty() && !open_set_t.is_empty() && remaining_steps > 0 {
// if we find this position in the other set
if closed_set_t[current_f.pos].g_cost != 0 {
reconstruct_path(
current_f.pos,
from,
to,
distance,
path,
&closed_set_f,
&closed_set_t,
);
debug!(
"find_path_in_room succeeded, steps taken: {} remaining_steps: {}",
max_steps - remaining_steps,
remaining_steps,
);
return Ok(remaining_steps);
}
// step `from`
{
current_f = open_set_f.pop().unwrap();
closed_set_f
.insert(current_f.pos, current_f.clone())
.unwrap();
for point in ¤t_f.pos.hex_neighbours() {
let point = *point;
if open_set_visited.at(point).copied().unwrap_or(VISITED_FROM) & VISITED_FROM != 0
|| positions.contains_key(point)
|| !is_walkable(point, terrain)
|| closed_set_f
.at(point)
.map(|node| node.g_cost != 0)
.unwrap_or(false)
{
continue;
}
open_set_visited[point] |= VISITED_FROM;
let node = Node::new(
point,
current_f.pos,
point.hex_distance(end) as i32,
current_f.g_cost + 1,
);
open_set_f.push(node);
}
}
// step `to`
{
let current_t = open_set_t.pop().unwrap();
closed_set_t
.insert(current_t.pos, current_t.clone())
.unwrap();
// if we find this position in the other set
if closed_set_f[current_t.pos].g_cost != 0 {
reconstruct_path(
current_t.pos,
from,
to,
distance,
path,
&closed_set_f,
&closed_set_t,
);
debug!(
"find_path_in_room succeeded, steps taken: {} remaining_steps: {}",
max_steps - remaining_steps,
remaining_steps,
);
return Ok(remaining_steps);
}
for point in ¤t_t.pos.hex_neighbours() {
let point = *point;
if point.hex_distance(end) <= distance
|| open_set_visited.at(point).copied().unwrap_or(VISITED_TO) & VISITED_TO != 0
|| !is_walkable(point, terrain)
|| positions.contains_key(point)
|| closed_set_t
.at(point)
.map(|node| node.g_cost != 0)
.unwrap_or(false)
{
continue;
}
open_set_visited[point] |= VISITED_TO;
let node = Node::new(
point,
current_t.pos,
point.hex_distance(from) as i32,
current_t.g_cost + 1,
);
open_set_t.push(node);
}
}
remaining_steps -= 1;
}
// failed
debug!(
"find_path_in_room failed, steps taken: {} remaining_steps: {}",
max_steps - remaining_steps,
remaining_steps
);
if remaining_steps > 0 {
// we ran out of possible paths
return Err(PathFindingError::Unreachable);
}
Err(PathFindingError::Timeout)
}
|
mod blockchain;
mod block;
mod map;
pub use block::{Block, create_block};
pub use blockchain::{Blockchain, create_blockchain};
use std::time::Instant;
fn main() {
let mut n: u64 = 1;
let mut b_chain = create_blockchain();
b_chain.add_first_block();
loop {
let now = Instant::now();
println!("Mining block {}", n);
b_chain.add_block(create_block(n, &format!("Block {} data", n)[..]));
n+=1;
println!("{}", now.elapsed().as_millis());
}
}
|
fn main() {
sudo_rs::sudo_main();
}
|
use std::fs::{File, OpenOptions};
use std::io;
use std::io::BufWriter;
use std::io::Write as IoWrite;
use std::path::PathBuf;
#[derive(Debug)]
pub struct FileWriter {
/// The path to the logging file.
path: PathBuf,
writer: BufWriter<File>,
}
impl FileWriter {
pub fn new(path: PathBuf) -> FileWriter {
let file =
OpenOptions::new().write(true).append(true).create(true).open(path.as_path()).unwrap();
FileWriter { path, writer: BufWriter::new(file) }
}
pub fn write(&mut self, record: String) -> io::Result<()> {
let writer = self.writer.get_mut();
let result = writeln!(writer, "{}", record);
if result.is_err() {
return result;
}
Ok(())
}
pub fn flush(&self) -> io::Result<()> {
Ok(())
}
}
|
use crate::persistence::{Persistence, PersistenceError, PersistenceResult, SqliteFactory};
use crate::tokens::DatabaseAddress;
use actix::prelude::*;
use crossbeam_channel::Sender;
use futures::FutureExt;
use log::debug;
use rusqlite::InterruptHandle;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::hash_map::Entry;
use std::{
collections::{BTreeMap, HashMap},
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
/// `RoutingActor` supervises all the active databases.
pub struct RoutingActor {
persistence: SqliteFactory,
actors: HashMap<DatabaseAddress, Addr<CoreActor>>,
}
impl RoutingActor {
pub fn new(persistence: SqliteFactory) -> RoutingActor {
RoutingActor {
persistence,
actors: HashMap::new(),
}
}
}
impl Actor for RoutingActor {
type Context = Context<Self>;
}
impl Message for DatabaseAddress {
type Result = PersistenceResult<Addr<CoreActor>>;
}
impl Handler<DatabaseAddress> for RoutingActor {
type Result = PersistenceResult<Addr<CoreActor>>;
fn handle(
&mut self,
db_addr: DatabaseAddress,
_ctx: &mut Context<Self>,
) -> PersistenceResult<Addr<CoreActor>> {
let addr = match self.actors.entry(db_addr) {
Entry::Occupied(occ) => occ.get().clone(),
Entry::Vacant(vac) => {
let db = crate::persistence::Timed::new(self.persistence.open(vac.key())?);
vac.insert(CoreActor::new(db).start()).clone()
}
};
Ok(addr)
}
}
struct Job<I, O> {
input: I,
output: futures::channel::oneshot::Sender<O>,
generation: usize,
}
/// `CoreActor` manages connections to a given database.
pub struct CoreActor {
queue: Sender<Job<DataMessage, PersistenceResult<String>>>,
interrupt_handle: InterruptHandle,
generation: Arc<AtomicUsize>,
}
const MAILBOX_SIZE: usize = 16;
impl CoreActor {
pub fn new<P: Persistence + 'static>(mut persistence: P) -> CoreActor {
let interrupt_handle = (&persistence).get_interrupt_handle();
let (tx, rx) =
crossbeam_channel::bounded::<Job<DataMessage, PersistenceResult<String>>>(MAILBOX_SIZE);
let signal = Arc::new(AtomicUsize::new(0));
let signal2 = signal.clone();
std::thread::spawn(move || {
let signal = signal.clone();
while let Ok(job) = rx.recv() {
let r = if signal.load(Ordering::Relaxed) > job.generation {
Err(PersistenceError::Interrupted)
} else {
handle_data_request(&mut persistence, job.input)
};
let _ = job.output.send(r);
}
});
CoreActor {
queue: tx,
interrupt_handle,
generation: signal2,
}
}
pub fn interrupt(&self) {
self.generation.fetch_add(1, Ordering::Relaxed);
self.interrupt_handle.interrupt();
}
}
impl Actor for CoreActor {
type Context = Context<Self>;
fn stopped(&mut self, _ctx: &mut Self::Context) {
self.interrupt();
}
}
/// Message to interact with the data in the database.
#[derive(Debug)]
pub enum EzdbMessage {
Data(DataMessage),
Logistics(LogisticsMessage),
}
/// Message to interact with the data in the database.
#[derive(Debug)]
pub enum DataMessage {
QueryNamed(String, BTreeMap<String, Value>),
MutateNamed(String, BTreeMap<String, Value>),
QueryRaw(String),
MutateRaw(String),
FetchPolicy,
SetPolicy(Policy),
}
/// Message to control the logistics of the database.
#[derive(Debug)]
pub enum LogisticsMessage {
Interrupt,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct Policy {
pub queries: Vec<QueryPolicy>,
pub mutations: Vec<MutationPolicy>,
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct QueryPolicy {
pub name: String,
pub raw_sql: String,
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct MutationPolicy {
pub name: String,
pub raw_sql: String,
}
impl Message for EzdbMessage {
type Result = PersistenceResult<String>;
}
impl Handler<EzdbMessage> for CoreActor {
type Result = ResponseFuture<PersistenceResult<String>>;
fn handle(&mut self, msg: EzdbMessage, _: &mut Context<Self>) -> Self::Result {
match msg {
EzdbMessage::Logistics(LogisticsMessage::Interrupt) => {
self.interrupt();
Box::pin(std::future::ready(Ok("ok".to_owned())))
}
EzdbMessage::Data(input) => {
let (tx, rx) = futures::channel::oneshot::channel();
let job = Job {
input,
output: tx,
generation: self.generation.load(Ordering::Relaxed),
};
match self.queue.try_send(job) {
Ok(_) => Box::pin(rx.map(|r| r.unwrap())),
Err(_) => Box::pin(std::future::ready(Err(PersistenceError::Busy))),
}
}
}
}
}
fn handle_data_request<P: Persistence>(
persistence: &mut P,
msg: DataMessage,
) -> PersistenceResult<String> {
debug!("handling {:?}", msg);
match msg {
DataMessage::QueryNamed(name, params) => {
let data = persistence.query_named(name, params)?;
Ok(serde_json::to_string(&data).expect("serialize"))
}
DataMessage::QueryRaw(query) => {
let data = persistence.query_raw(query)?;
Ok(serde_json::to_string(&data).expect("serialize"))
}
DataMessage::MutateNamed(name, params) => {
let data = persistence.mutate_named(name, params)?;
Ok(serde_json::to_string(&data).expect("serialize"))
}
DataMessage::MutateRaw(stmt) => {
let data = persistence.mutate_raw(stmt)?;
Ok(serde_json::to_string(&data).expect("serialize"))
}
DataMessage::FetchPolicy => {
let data = persistence.fetch_policy()?;
Ok(serde_json::to_string(&data).expect("serialize"))
}
DataMessage::SetPolicy(policy) => {
let data = persistence.set_policy(policy)?;
Ok(serde_json::to_string(&data).expect("serialize"))
}
}
}
#[cfg(test)]
mod test {
use super::{CoreActor, DataMessage, EzdbMessage, LogisticsMessage};
use crate::persistence::{PersistenceError, SqlitePersistence};
use actix::{Actor, Addr};
use std::time::Duration;
#[actix_rt::test]
async fn expensive_queries_can_be_interrupted() {
let actor = CoreActor::new(SqlitePersistence::in_memory().unwrap()).start();
mutate_raw(&actor, "CREATE TABLE foo (x INTEGER)").await;
mutate_raw(&actor, "INSERT INTO foo (x) VALUES (0)").await;
for _ in 0..10 {
mutate_raw(&actor, "INSERT INTO foo (x) SELECT x FROM foo").await;
}
// `foo` now has 1024 entries. `foo JOIN foo JOIN foo` has 2^30 entries, which is extremely expensive.
let m0 = actor.send(EzdbMessage::Data(DataMessage::QueryRaw(
"SELECT COUNT(1) FROM foo JOIN foo JOIN foo".to_owned(),
)));
actix_rt::time::delay_for(Duration::from_millis(10)).await;
let m1 = actor.send(EzdbMessage::Logistics(LogisticsMessage::Interrupt));
assert_eq!(m1.await.unwrap().unwrap(), "ok");
assert_eq!(m0.await.unwrap(), Err(PersistenceError::Interrupted));
}
async fn mutate_raw(actor: &Addr<CoreActor>, raw: &str) {
let req = DataMessage::MutateRaw(raw.to_owned());
actor.send(EzdbMessage::Data(req)).await.unwrap().unwrap();
}
}
|
use std::rc::Rc;
pub use crate::globalstate::GlobalState;
pub use crate::instructions::Instruction;
pub use crate::configuration::Configuration;
//an instruction object which contains all protential instructions for a given function and their rate of occurance
pub struct InstructionObject {
pub probable_ops: Vec::<(i64, Rc::<Instruction>)>,
pub configuration: Configuration,
}
fn nop(state: &mut GlobalState) {
}
impl InstructionObject {
pub fn new(config: Configuration, operation: (i64, Rc::<Instruction>), inverse: (i64, Rc::<Instruction>)) -> InstructionObject {
InstructionObject {
probable_ops: vec![(operation.0, operation.1), (inverse.0, inverse.1), (0, Rc::new(Instruction::new(Box::new(nop))))],
configuration: config,
}
}
//TODO: Handle the multiplier
pub fn call_fn(&self, state: &mut GlobalState, probability: i64, probability_modifier: i64, probability_multiplier: f64) -> f64 {
let final_probability = probability + probability_modifier;
let mut execution_probability = 1.;
for (chance, op) in &self.probable_ops {
if final_probability >= *chance {
(op).as_ref().call_fn(state);
execution_probability = (*chance as f64) / (self.configuration.max_prob as f64);
return execution_probability;
}
}
execution_probability
}
}
|
// #![allow(unused)]
use std::io::BufReader;
use std::io::prelude::*;
use std::fs::File;
// Exo1
// fn main() {
// let values = values();
// for (i, value) in values[25..].iter().enumerate() {
// let window = &values[i..i + 25];
// if sum_in(window, value) == false {
// println!("result : {}", value);
// break;
// }
// }
// }
// Exo2
fn main() {
let values = values();
for (i, _) in values.iter().enumerate() {
for (j, _) in values[i..].iter().enumerate() {
let sum: u64 = values[i..(i + j)].iter().sum();
if sum > 756008079 {
break
}
if sum == 756008079 {
let min = values[i..(i + j)].iter().min().unwrap();
let max = values[i..(i + j)].iter().max().unwrap();
println!("min: {} max:{}", min, max);
println!("sum: {}", min + max);
return;
}
}
}
}
fn values() -> Vec<u64> {
let file = File::open("input").unwrap();
let buf = BufReader::new(file);
let mut input = Vec::new();
for line in buf.lines() {
let line = line.unwrap().parse().unwrap();
input.push(line);
}
return input;
}
fn sum_in(arr: &[u64], sum: &u64) -> bool {
for i in arr.iter() {
for j in arr.iter() {
if i + j == *sum {
return true;
}
}
}
return false;
} |
use actix::prelude::*;
use chrono;
use diesel;
use diesel::prelude::*;
use uuid;
use crate::db::schema::report;
#[derive(Debug, Insertable, Queryable, Clone)]
#[table_name = "report"]
pub struct ReportDb {
pub id: String,
name: String,
folder: String,
created_on: chrono::NaiveDateTime,
last_update: chrono::NaiveDateTime,
}
use crate::db::schema::test_result_in_report;
#[derive(Debug, Insertable, Queryable, Clone)]
#[table_name = "test_result_in_report"]
struct TestResultInReportDb {
report_id: String,
test_id: String,
trace_id: String,
category: String,
environment: Option<String>,
status: i32,
}
impl super::DbExecutor {
fn find_report(&mut self, report_db: &ReportDb) -> Option<ReportDb> {
use super::super::schema::report::dsl::*;
report
.filter(folder.eq(&report_db.folder))
.filter(name.eq(&report_db.name))
.first::<ReportDb>(self.0.as_ref().expect("fail to get DB"))
.ok()
}
fn update_report_or_create(&mut self, report_db: &ReportDb) -> String {
use super::super::schema::report::dsl::*;
match self.find_report(report_db) {
Some(existing) => {
diesel::update(report.filter(id.eq(&existing.id)))
.set(last_update.eq(report_db.last_update))
.execute(self.0.as_ref().expect("fail to get DB"))
.ok();
existing.id
}
None => {
let new_id = uuid::Uuid::new_v4().to_hyphenated().to_string();
let could_insert = diesel::insert_into(report)
.values(&ReportDb {
id: new_id.clone(),
..(*report_db).clone()
})
.execute(self.0.as_ref().expect("fail to get DB"));
if could_insert.is_err() {
self.find_report(report_db)
.map(|existing| {
diesel::update(report.filter(id.eq(&existing.id)))
.set(last_update.eq(report_db.last_update))
.execute(self.0.as_ref().expect("fail to get DB"))
.ok();
existing.id
})
.expect("fail to find report")
} else {
new_id
}
}
}
}
}
impl Handler<crate::engine::report::ResultForReport> for super::DbExecutor {
type Result = ();
fn handle(
&mut self,
msg: crate::engine::report::ResultForReport,
_: &mut Self::Context,
) -> Self::Result {
let report = ReportDb {
id: "n/a".to_string(),
name: msg.report_name.clone(),
folder: msg.report_group.clone(),
created_on: chrono::Utc::now().naive_utc(),
last_update: chrono::Utc::now().naive_utc(),
};
let found_report_id = self.update_report_or_create(&report);
use super::super::schema::test_result_in_report::dsl::*;
let mut find_tr = test_result_in_report
.filter(report_id.eq(&found_report_id))
.filter(test_id.eq(&msg.result.test_id))
.into_boxed();
if let Some(category_from_input) = msg.category.clone() {
find_tr = find_tr.filter(category.eq(category_from_input));
} else {
find_tr = find_tr.filter(category.eq(&msg.report_name));
}
if let Some(environment_from_input) = msg.result.environment.clone() {
find_tr = find_tr.filter(environment.eq(environment_from_input));
} else {
find_tr = find_tr.filter(environment.is_null());
}
if find_tr
.first::<TestResultInReportDb>(self.0.as_ref().expect("fail to get DB"))
.ok()
.is_some()
{
match (msg.category, msg.result.environment) {
(Some(category_from_input), Some(environment_from_input)) => {
diesel::update(
test_result_in_report
.filter(report_id.eq(&found_report_id))
.filter(test_id.eq(&msg.result.test_id))
.filter(category.eq(category_from_input))
.filter(environment.eq(environment_from_input)),
)
.set((
trace_id.eq(msg.result.trace_id),
status.eq(msg.result.status.as_i32()),
))
.execute(self.0.as_ref().expect("fail to get DB"))
.ok();
}
(Some(category_from_input), None) => {
diesel::update(
test_result_in_report
.filter(report_id.eq(&found_report_id))
.filter(test_id.eq(&msg.result.test_id))
.filter(category.eq(category_from_input))
.filter(environment.is_null()),
)
.set((
trace_id.eq(msg.result.trace_id),
status.eq(msg.result.status.as_i32()),
))
.execute(self.0.as_ref().expect("fail to get DB"))
.ok();
}
(None, Some(environment_from_input)) => {
diesel::update(
test_result_in_report
.filter(report_id.eq(&found_report_id))
.filter(test_id.eq(&msg.result.test_id))
.filter(category.eq(&msg.report_name))
.filter(environment.eq(environment_from_input)),
)
.set((
trace_id.eq(msg.result.trace_id),
status.eq(msg.result.status.as_i32()),
))
.execute(self.0.as_ref().expect("fail to get DB"))
.ok();
}
(None, None) => {
diesel::update(
test_result_in_report
.filter(report_id.eq(&found_report_id))
.filter(test_id.eq(&msg.result.test_id))
.filter(category.eq(&msg.report_name))
.filter(environment.is_null()),
)
.set((
trace_id.eq(msg.result.trace_id),
status.eq(msg.result.status.as_i32()),
))
.execute(self.0.as_ref().expect("fail to get DB"))
.ok();
}
};
} else {
diesel::insert_into(test_result_in_report)
.values(&TestResultInReportDb {
test_id: msg.result.test_id.clone(),
trace_id: msg.result.trace_id.clone(),
report_id: found_report_id.clone(),
category: msg
.category
.clone()
.unwrap_or_else(|| msg.report_name.clone()),
environment: msg.result.environment,
status: msg.result.status.into(),
})
.execute(self.0.as_ref().expect("fail to get DB"))
.ok();
}
}
}
|
use std::sync::Arc;
use std::cell::RefCell;
fn main() {
let mut x = 1;
foo(x);
}
fn foo(mut x : i32)
{
x= 1;
println!("{}", x);
}
fn Interior()
{
let x = Arc::new(5);
let y = x.clone();
}
fn Exterior()
{
let x = RefCell::new(42);
let y = x.borrow_mut();
let z = x.borrow_mut();
} |
use std::fmt;
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.height > other.height && self.width > other.width
}
}
impl fmt::Display for Rectangle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "width: {}, height: {}", self.width, self.height)
}
}
fn main() {
let r1: Rectangle = Rectangle {
width: 30,
height: 50,
};
let r2: Rectangle = Rectangle {
width: 10,
height: 40,
};
let r3: Rectangle = Rectangle {
width: 60,
height: 45,
};
println!("Can rect1 hold rect2: {}", r1.can_hold(&r2));
println!("Can rect2 hold rect3: {}", r2.can_hold(&r3));
}
|
pub mod ex01_1;
pub mod ex01_2;
pub mod ex01_3;
|
use anyhow::Result;
use clap::ArgMatches;
use colored::*;
use std::path::PathBuf;
use crate::cfg::Cfg;
use crate::cli::cfg::get_cfg;
use crate::cli::commands::sync::{sync_workflow, SyncSettings};
use crate::cli::error::CliError;
use crate::cli::settings::get_settings;
use crate::cli::terminal::message::success;
use crate::env_file::{path_from_env_name, Env};
use super::r#use::use_workflow;
pub fn env_new(app: &ArgMatches) -> Result<()> {
let mut cfg = get_cfg()?;
cfg.sync_local_to_global()?;
let cfg = cfg;
let mut settings = get_settings(app, &cfg);
let sync_settings = SyncSettings::new(&app);
let setup_name = settings.setup()?;
let env_name: String = app.value_of("name").unwrap().into();
let private = app.is_present("private");
let setup = cfg.current_setup(setup_name)?;
let mut envs = setup.envs().into_iter().filter_map(|r| r.ok()).collect();
let recent_env = Env::recent(&envs);
let new_env = env_new_workflow(&cfg, &setup_name, &env_name, &private, &false)?;
envs.push(new_env.clone());
if let Ok(recent_env) = recent_env {
if let Err(e) = sync_workflow(recent_env.clone(), envs, sync_settings) {
// Remove env file when sync is stopped/fail.
new_env.remove()?;
return Err(e);
}
}
settings.set_env(new_env.name()?);
use_workflow(&cfg, &settings)?;
cfg.save()?;
success(format!("env `{}` created : `{:?}`", env_name.bold(), new_env.file()).as_str());
Ok(())
}
pub fn env_new_workflow(
cfg: &Cfg,
setup_name: &String,
env_name: &String,
private: &bool,
example: &bool,
) -> Result<Env> {
let setup = cfg.current_setup(setup_name)?;
let retrieve_env_is_not_exists = |dir: PathBuf| -> Result<Env> {
let env = path_from_env_name(dir, env_name);
let mut env: Env = env.into();
if *example {
env.add("VAR1", "VALUE1");
env.add("VAR2", "VALUE2");
}
if env.file().exists() {
return Err(CliError::EnvFileAlreadyExists(env.file().clone(), env.clone()).into());
} else {
Ok(env)
}
};
let public_env = setup.envs_public_dir().map(retrieve_env_is_not_exists);
if let Ok(Err(err)) = public_env {
return Err(err);
};
let private_env = setup.envs_private_dir().map(retrieve_env_is_not_exists);
if let Ok(Err(err)) = private_env {
return Err(err);
};
let env = if *private {
private_env??
} else {
public_env??
};
env.save()?;
Ok(env)
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, API_VERSION};
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
AppServiceEnvironments_List(#[from] app_service_environments::list::Error),
#[error(transparent)]
AppServiceEnvironments_ListByResourceGroup(#[from] app_service_environments::list_by_resource_group::Error),
#[error(transparent)]
AppServiceEnvironments_Get(#[from] app_service_environments::get::Error),
#[error(transparent)]
AppServiceEnvironments_CreateOrUpdate(#[from] app_service_environments::create_or_update::Error),
#[error(transparent)]
AppServiceEnvironments_Update(#[from] app_service_environments::update::Error),
#[error(transparent)]
AppServiceEnvironments_Delete(#[from] app_service_environments::delete::Error),
#[error(transparent)]
AppServiceEnvironments_ListCapacities(#[from] app_service_environments::list_capacities::Error),
#[error(transparent)]
AppServiceEnvironments_ListVips(#[from] app_service_environments::list_vips::Error),
#[error(transparent)]
AppServiceEnvironments_ListDiagnostics(#[from] app_service_environments::list_diagnostics::Error),
#[error(transparent)]
AppServiceEnvironments_GetDiagnosticsItem(#[from] app_service_environments::get_diagnostics_item::Error),
#[error(transparent)]
AppServiceEnvironments_ListMetricDefinitions(#[from] app_service_environments::list_metric_definitions::Error),
#[error(transparent)]
AppServiceEnvironments_ListMetrics(#[from] app_service_environments::list_metrics::Error),
#[error(transparent)]
AppServiceEnvironments_ListMultiRolePools(#[from] app_service_environments::list_multi_role_pools::Error),
#[error(transparent)]
AppServiceEnvironments_GetMultiRolePool(#[from] app_service_environments::get_multi_role_pool::Error),
#[error(transparent)]
AppServiceEnvironments_CreateOrUpdateMultiRolePool(#[from] app_service_environments::create_or_update_multi_role_pool::Error),
#[error(transparent)]
AppServiceEnvironments_UpdateMultiRolePool(#[from] app_service_environments::update_multi_role_pool::Error),
#[error(transparent)]
AppServiceEnvironments_ListMultiRolePoolInstanceMetricDefinitions(
#[from] app_service_environments::list_multi_role_pool_instance_metric_definitions::Error,
),
#[error(transparent)]
AppServiceEnvironments_ListMultiRolePoolInstanceMetrics(#[from] app_service_environments::list_multi_role_pool_instance_metrics::Error),
#[error(transparent)]
AppServiceEnvironments_ListMultiRoleMetricDefinitions(#[from] app_service_environments::list_multi_role_metric_definitions::Error),
#[error(transparent)]
AppServiceEnvironments_ListMultiRoleMetrics(#[from] app_service_environments::list_multi_role_metrics::Error),
#[error(transparent)]
AppServiceEnvironments_ListMultiRolePoolSkus(#[from] app_service_environments::list_multi_role_pool_skus::Error),
#[error(transparent)]
AppServiceEnvironments_ListMultiRoleUsages(#[from] app_service_environments::list_multi_role_usages::Error),
#[error(transparent)]
AppServiceEnvironments_ListOperations(#[from] app_service_environments::list_operations::Error),
#[error(transparent)]
AppServiceEnvironments_Reboot(#[from] app_service_environments::reboot::Error),
#[error(transparent)]
AppServiceEnvironments_Resume(#[from] app_service_environments::resume::Error),
#[error(transparent)]
AppServiceEnvironments_ListAppServicePlans(#[from] app_service_environments::list_app_service_plans::Error),
#[error(transparent)]
AppServiceEnvironments_ListWebApps(#[from] app_service_environments::list_web_apps::Error),
#[error(transparent)]
AppServiceEnvironments_Suspend(#[from] app_service_environments::suspend::Error),
#[error(transparent)]
AppServiceEnvironments_ListUsages(#[from] app_service_environments::list_usages::Error),
#[error(transparent)]
AppServiceEnvironments_ListWorkerPools(#[from] app_service_environments::list_worker_pools::Error),
#[error(transparent)]
AppServiceEnvironments_GetWorkerPool(#[from] app_service_environments::get_worker_pool::Error),
#[error(transparent)]
AppServiceEnvironments_CreateOrUpdateWorkerPool(#[from] app_service_environments::create_or_update_worker_pool::Error),
#[error(transparent)]
AppServiceEnvironments_UpdateWorkerPool(#[from] app_service_environments::update_worker_pool::Error),
#[error(transparent)]
AppServiceEnvironments_ListWorkerPoolInstanceMetricDefinitions(
#[from] app_service_environments::list_worker_pool_instance_metric_definitions::Error,
),
#[error(transparent)]
AppServiceEnvironments_ListWorkerPoolInstanceMetrics(#[from] app_service_environments::list_worker_pool_instance_metrics::Error),
#[error(transparent)]
AppServiceEnvironments_ListWebWorkerMetricDefinitions(#[from] app_service_environments::list_web_worker_metric_definitions::Error),
#[error(transparent)]
AppServiceEnvironments_ListWebWorkerMetrics(#[from] app_service_environments::list_web_worker_metrics::Error),
#[error(transparent)]
AppServiceEnvironments_ListWorkerPoolSkus(#[from] app_service_environments::list_worker_pool_skus::Error),
#[error(transparent)]
AppServiceEnvironments_ListWebWorkerUsages(#[from] app_service_environments::list_web_worker_usages::Error),
#[error(transparent)]
AppServicePlans_List(#[from] app_service_plans::list::Error),
#[error(transparent)]
AppServicePlans_ListByResourceGroup(#[from] app_service_plans::list_by_resource_group::Error),
#[error(transparent)]
AppServicePlans_Get(#[from] app_service_plans::get::Error),
#[error(transparent)]
AppServicePlans_CreateOrUpdate(#[from] app_service_plans::create_or_update::Error),
#[error(transparent)]
AppServicePlans_Update(#[from] app_service_plans::update::Error),
#[error(transparent)]
AppServicePlans_Delete(#[from] app_service_plans::delete::Error),
#[error(transparent)]
AppServicePlans_ListCapabilities(#[from] app_service_plans::list_capabilities::Error),
#[error(transparent)]
AppServicePlans_GetHybridConnection(#[from] app_service_plans::get_hybrid_connection::Error),
#[error(transparent)]
AppServicePlans_DeleteHybridConnection(#[from] app_service_plans::delete_hybrid_connection::Error),
#[error(transparent)]
AppServicePlans_ListHybridConnectionKeys(#[from] app_service_plans::list_hybrid_connection_keys::Error),
#[error(transparent)]
AppServicePlans_ListWebAppsByHybridConnection(#[from] app_service_plans::list_web_apps_by_hybrid_connection::Error),
#[error(transparent)]
AppServicePlans_GetHybridConnectionPlanLimit(#[from] app_service_plans::get_hybrid_connection_plan_limit::Error),
#[error(transparent)]
AppServicePlans_ListHybridConnections(#[from] app_service_plans::list_hybrid_connections::Error),
#[error(transparent)]
AppServicePlans_ListMetricDefintions(#[from] app_service_plans::list_metric_defintions::Error),
#[error(transparent)]
AppServicePlans_ListMetrics(#[from] app_service_plans::list_metrics::Error),
#[error(transparent)]
AppServicePlans_RestartWebApps(#[from] app_service_plans::restart_web_apps::Error),
#[error(transparent)]
AppServicePlans_ListWebApps(#[from] app_service_plans::list_web_apps::Error),
#[error(transparent)]
AppServicePlans_GetServerFarmSkus(#[from] app_service_plans::get_server_farm_skus::Error),
#[error(transparent)]
AppServicePlans_ListUsages(#[from] app_service_plans::list_usages::Error),
#[error(transparent)]
AppServicePlans_ListVnets(#[from] app_service_plans::list_vnets::Error),
#[error(transparent)]
AppServicePlans_GetVnetFromServerFarm(#[from] app_service_plans::get_vnet_from_server_farm::Error),
#[error(transparent)]
AppServicePlans_GetVnetGateway(#[from] app_service_plans::get_vnet_gateway::Error),
#[error(transparent)]
AppServicePlans_UpdateVnetGateway(#[from] app_service_plans::update_vnet_gateway::Error),
#[error(transparent)]
AppServicePlans_ListRoutesForVnet(#[from] app_service_plans::list_routes_for_vnet::Error),
#[error(transparent)]
AppServicePlans_GetRouteForVnet(#[from] app_service_plans::get_route_for_vnet::Error),
#[error(transparent)]
AppServicePlans_CreateOrUpdateVnetRoute(#[from] app_service_plans::create_or_update_vnet_route::Error),
#[error(transparent)]
AppServicePlans_UpdateVnetRoute(#[from] app_service_plans::update_vnet_route::Error),
#[error(transparent)]
AppServicePlans_DeleteVnetRoute(#[from] app_service_plans::delete_vnet_route::Error),
#[error(transparent)]
AppServicePlans_RebootWorker(#[from] app_service_plans::reboot_worker::Error),
}
pub mod app_service_environments {
use super::{models, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<models::AppServiceEnvironmentCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Web/hostingEnvironments",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppServiceEnvironmentCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<models::AppServiceEnvironmentCollection, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppServiceEnvironmentCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_resource_group::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_resource_group {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::AppServiceEnvironmentResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppServiceEnvironmentResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
hosting_environment_envelope: &models::AppServiceEnvironmentResource,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(hosting_environment_envelope).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppServiceEnvironmentResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::AppServiceEnvironmentResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Accepted202(rsp_value))
}
http::StatusCode::BAD_REQUEST => Err(create_or_update::Error::BadRequest400 {}),
http::StatusCode::NOT_FOUND => Err(create_or_update::Error::NotFound404 {}),
http::StatusCode::CONFLICT => Err(create_or_update::Error::Conflict409 {}),
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::AppServiceEnvironmentResource),
Accepted202(models::AppServiceEnvironmentResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
BadRequest400 {},
#[error("Error response #response_type")]
NotFound404 {},
#[error("Error response #response_type")]
Conflict409 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
hosting_environment_envelope: &models::AppServiceEnvironmentPatchResource,
subscription_id: &str,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(hosting_environment_envelope).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppServiceEnvironmentResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::AppServiceEnvironmentResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Accepted202(rsp_value))
}
http::StatusCode::BAD_REQUEST => Err(update::Error::BadRequest400 {}),
http::StatusCode::NOT_FOUND => Err(update::Error::NotFound404 {}),
http::StatusCode::CONFLICT => Err(update::Error::Conflict409 {}),
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::AppServiceEnvironmentResource),
Accepted202(models::AppServiceEnvironmentResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
BadRequest400 {},
#[error("Error response #response_type")]
NotFound404 {},
#[error("Error response #response_type")]
Conflict409 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
force_delete: Option<bool>,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(force_delete) = force_delete {
url.query_pairs_mut().append_pair("forceDelete", force_delete.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::BAD_REQUEST => Err(delete::Error::BadRequest400 {}),
http::StatusCode::NOT_FOUND => Err(delete::Error::NotFound404 {}),
http::StatusCode::CONFLICT => Err(delete::Error::Conflict409 {}),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
BadRequest400 {},
#[error("Error response #response_type")]
NotFound404 {},
#[error("Error response #response_type")]
Conflict409 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_capacities(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::StampCapacityCollection, list_capacities::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/capacities/compute",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_capacities::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_capacities::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_capacities::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_capacities::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::StampCapacityCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_capacities::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_capacities::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_capacities {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_vips(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::AddressResponse, list_vips::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/capacities/virtualip",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_vips::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_vips::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_vips::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_vips::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AddressResponse =
serde_json::from_slice(rsp_body).map_err(|source| list_vips::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_vips::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_vips {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_diagnostics(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<Vec<models::HostingEnvironmentDiagnostics>, list_diagnostics::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/diagnostics",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_diagnostics::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_diagnostics::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_diagnostics::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_diagnostics::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<models::HostingEnvironmentDiagnostics> = serde_json::from_slice(rsp_body)
.map_err(|source| list_diagnostics::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_diagnostics::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_diagnostics {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_diagnostics_item(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
diagnostics_name: &str,
subscription_id: &str,
) -> std::result::Result<models::HostingEnvironmentDiagnostics, get_diagnostics_item::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/diagnostics/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
diagnostics_name
);
let mut url = url::Url::parse(url_str).map_err(get_diagnostics_item::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_diagnostics_item::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_diagnostics_item::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_diagnostics_item::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::HostingEnvironmentDiagnostics = serde_json::from_slice(rsp_body)
.map_err(|source| get_diagnostics_item::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_diagnostics_item::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_diagnostics_item {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_metric_definitions(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::MetricDefinition, list_metric_definitions::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/metricdefinitions",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_metric_definitions::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_metric_definitions::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_metric_definitions::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_metric_definitions::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::MetricDefinition = serde_json::from_slice(rsp_body)
.map_err(|source| list_metric_definitions::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_metric_definitions::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_metric_definitions {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_metrics(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
details: Option<bool>,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::ResourceMetricCollection, list_metrics::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/metrics",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_metrics::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(details) = details {
url.query_pairs_mut().append_pair("details", details.to_string().as_str());
}
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_metrics::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceMetricCollection =
serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_metrics::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_metrics {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_multi_role_pools(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::WorkerPoolCollection, list_multi_role_pools::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/multiRolePools",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_multi_role_pools::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_multi_role_pools::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_multi_role_pools::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_multi_role_pools::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_multi_role_pools::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_multi_role_pools::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_multi_role_pools {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_multi_role_pool(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::WorkerPoolResource, get_multi_role_pool::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/multiRolePools/default",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(get_multi_role_pool::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_multi_role_pool::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_multi_role_pool::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_multi_role_pool::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolResource = serde_json::from_slice(rsp_body)
.map_err(|source| get_multi_role_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_multi_role_pool::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_multi_role_pool {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update_multi_role_pool(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
multi_role_pool_envelope: &models::WorkerPoolResource,
subscription_id: &str,
) -> std::result::Result<create_or_update_multi_role_pool::Response, create_or_update_multi_role_pool::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/multiRolePools/default",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update_multi_role_pool::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update_multi_role_pool::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(multi_role_pool_envelope).map_err(create_or_update_multi_role_pool::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(create_or_update_multi_role_pool::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update_multi_role_pool::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_multi_role_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_multi_role_pool::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_multi_role_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_multi_role_pool::Response::Accepted202(rsp_value))
}
http::StatusCode::BAD_REQUEST => Err(create_or_update_multi_role_pool::Error::BadRequest400 {}),
http::StatusCode::NOT_FOUND => Err(create_or_update_multi_role_pool::Error::NotFound404 {}),
http::StatusCode::CONFLICT => Err(create_or_update_multi_role_pool::Error::Conflict409 {}),
status_code => {
let rsp_body = rsp.body();
Err(create_or_update_multi_role_pool::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update_multi_role_pool {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::WorkerPoolResource),
Accepted202(models::WorkerPoolResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
BadRequest400 {},
#[error("Error response #response_type")]
NotFound404 {},
#[error("Error response #response_type")]
Conflict409 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update_multi_role_pool(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
multi_role_pool_envelope: &models::WorkerPoolResource,
subscription_id: &str,
) -> std::result::Result<update_multi_role_pool::Response, update_multi_role_pool::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/multiRolePools/default",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(update_multi_role_pool::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update_multi_role_pool::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(multi_role_pool_envelope).map_err(update_multi_role_pool::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(update_multi_role_pool::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(update_multi_role_pool::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolResource = serde_json::from_slice(rsp_body)
.map_err(|source| update_multi_role_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update_multi_role_pool::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolResource = serde_json::from_slice(rsp_body)
.map_err(|source| update_multi_role_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update_multi_role_pool::Response::Accepted202(rsp_value))
}
http::StatusCode::BAD_REQUEST => Err(update_multi_role_pool::Error::BadRequest400 {}),
http::StatusCode::NOT_FOUND => Err(update_multi_role_pool::Error::NotFound404 {}),
http::StatusCode::CONFLICT => Err(update_multi_role_pool::Error::Conflict409 {}),
status_code => {
let rsp_body = rsp.body();
Err(update_multi_role_pool::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update_multi_role_pool {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::WorkerPoolResource),
Accepted202(models::WorkerPoolResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
BadRequest400 {},
#[error("Error response #response_type")]
NotFound404 {},
#[error("Error response #response_type")]
Conflict409 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_multi_role_pool_instance_metric_definitions(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
instance: &str,
subscription_id: &str,
) -> std::result::Result<models::ResourceMetricDefinitionCollection, list_multi_role_pool_instance_metric_definitions::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/multiRolePools/default/instances/{}/metricdefinitions" , operation_config . base_path () , subscription_id , resource_group_name , name , instance) ;
let mut url = url::Url::parse(url_str).map_err(list_multi_role_pool_instance_metric_definitions::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_multi_role_pool_instance_metric_definitions::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_multi_role_pool_instance_metric_definitions::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_multi_role_pool_instance_metric_definitions::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceMetricDefinitionCollection = serde_json::from_slice(rsp_body).map_err(|source| {
list_multi_role_pool_instance_metric_definitions::Error::DeserializeError(source, rsp_body.clone())
})?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_multi_role_pool_instance_metric_definitions::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_multi_role_pool_instance_metric_definitions {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_multi_role_pool_instance_metrics(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
instance: &str,
details: Option<bool>,
subscription_id: &str,
) -> std::result::Result<models::ResourceMetricCollection, list_multi_role_pool_instance_metrics::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/multiRolePools/default/instances/{}/metrics" , operation_config . base_path () , subscription_id , resource_group_name , name , instance) ;
let mut url = url::Url::parse(url_str).map_err(list_multi_role_pool_instance_metrics::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_multi_role_pool_instance_metrics::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(details) = details {
url.query_pairs_mut().append_pair("details", details.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_multi_role_pool_instance_metrics::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_multi_role_pool_instance_metrics::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceMetricCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_multi_role_pool_instance_metrics::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_multi_role_pool_instance_metrics::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_multi_role_pool_instance_metrics {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_multi_role_metric_definitions(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::ResourceMetricDefinitionCollection, list_multi_role_metric_definitions::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/multiRolePools/default/metricdefinitions",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_multi_role_metric_definitions::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_multi_role_metric_definitions::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_multi_role_metric_definitions::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_multi_role_metric_definitions::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceMetricDefinitionCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_multi_role_metric_definitions::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_multi_role_metric_definitions::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_multi_role_metric_definitions {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_multi_role_metrics(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
start_time: Option<&str>,
end_time: Option<&str>,
time_grain: Option<&str>,
details: Option<bool>,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::ResourceMetricCollection, list_multi_role_metrics::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/multiRolePools/default/metrics",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_multi_role_metrics::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_multi_role_metrics::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(start_time) = start_time {
url.query_pairs_mut().append_pair("startTime", start_time);
}
if let Some(end_time) = end_time {
url.query_pairs_mut().append_pair("endTime", end_time);
}
if let Some(time_grain) = time_grain {
url.query_pairs_mut().append_pair("timeGrain", time_grain);
}
if let Some(details) = details {
url.query_pairs_mut().append_pair("details", details.to_string().as_str());
}
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_multi_role_metrics::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_multi_role_metrics::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceMetricCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_multi_role_metrics::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_multi_role_metrics::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_multi_role_metrics {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_multi_role_pool_skus(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::SkuInfoCollection, list_multi_role_pool_skus::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/multiRolePools/default/skus",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_multi_role_pool_skus::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_multi_role_pool_skus::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_multi_role_pool_skus::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_multi_role_pool_skus::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::SkuInfoCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_multi_role_pool_skus::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_multi_role_pool_skus::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_multi_role_pool_skus {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_multi_role_usages(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::UsageCollection, list_multi_role_usages::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/multiRolePools/default/usages",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_multi_role_usages::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_multi_role_usages::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_multi_role_usages::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_multi_role_usages::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::UsageCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_multi_role_usages::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_multi_role_usages::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_multi_role_usages {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_operations(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<Vec<models::Operation>, list_operations::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/operations",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_operations::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_operations::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_operations::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_operations::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<models::Operation> = serde_json::from_slice(rsp_body)
.map_err(|source| list_operations::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_operations::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_operations {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn reboot(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<(), reboot::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/reboot",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(reboot::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(reboot::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(reboot::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(reboot::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(()),
http::StatusCode::BAD_REQUEST => Err(reboot::Error::BadRequest400 {}),
http::StatusCode::NOT_FOUND => Err(reboot::Error::NotFound404 {}),
http::StatusCode::CONFLICT => Err(reboot::Error::Conflict409 {}),
status_code => {
let rsp_body = rsp.body();
Err(reboot::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod reboot {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
BadRequest400 {},
#[error("Error response #response_type")]
NotFound404 {},
#[error("Error response #response_type")]
Conflict409 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn resume(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<resume::Response, resume::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/resume",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(resume::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(resume::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(resume::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(resume::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WebAppCollection =
serde_json::from_slice(rsp_body).map_err(|source| resume::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(resume::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::WebAppCollection =
serde_json::from_slice(rsp_body).map_err(|source| resume::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(resume::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(resume::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod resume {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::WebAppCollection),
Accepted202(models::WebAppCollection),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_app_service_plans(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::AppServicePlanCollection, list_app_service_plans::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/serverfarms",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_app_service_plans::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_app_service_plans::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_app_service_plans::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_app_service_plans::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppServicePlanCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_app_service_plans::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_app_service_plans::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_app_service_plans {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_web_apps(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
properties_to_include: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::WebAppCollection, list_web_apps::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/sites",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_web_apps::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_web_apps::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(properties_to_include) = properties_to_include {
url.query_pairs_mut().append_pair("propertiesToInclude", properties_to_include);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_web_apps::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_web_apps::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WebAppCollection =
serde_json::from_slice(rsp_body).map_err(|source| list_web_apps::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_web_apps::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_web_apps {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn suspend(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<suspend::Response, suspend::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/suspend",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(suspend::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(suspend::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(suspend::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(suspend::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WebAppCollection =
serde_json::from_slice(rsp_body).map_err(|source| suspend::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(suspend::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::WebAppCollection =
serde_json::from_slice(rsp_body).map_err(|source| suspend::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(suspend::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(suspend::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod suspend {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::WebAppCollection),
Accepted202(models::WebAppCollection),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_usages(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::CsmUsageQuotaCollection, list_usages::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/usages",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_usages::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_usages::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_usages::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_usages::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::CsmUsageQuotaCollection =
serde_json::from_slice(rsp_body).map_err(|source| list_usages::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_usages::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_usages {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_worker_pools(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::WorkerPoolCollection, list_worker_pools::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/workerPools",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_worker_pools::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_worker_pools::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_worker_pools::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_worker_pools::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_worker_pools::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_worker_pools::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_worker_pools {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_worker_pool(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
worker_pool_name: &str,
subscription_id: &str,
) -> std::result::Result<models::WorkerPoolResource, get_worker_pool::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/workerPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
worker_pool_name
);
let mut url = url::Url::parse(url_str).map_err(get_worker_pool::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_worker_pool::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_worker_pool::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_worker_pool::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolResource = serde_json::from_slice(rsp_body)
.map_err(|source| get_worker_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_worker_pool::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_worker_pool {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update_worker_pool(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
worker_pool_name: &str,
worker_pool_envelope: &models::WorkerPoolResource,
subscription_id: &str,
) -> std::result::Result<create_or_update_worker_pool::Response, create_or_update_worker_pool::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/workerPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
worker_pool_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update_worker_pool::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update_worker_pool::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(worker_pool_envelope).map_err(create_or_update_worker_pool::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(create_or_update_worker_pool::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update_worker_pool::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_worker_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_worker_pool::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_worker_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update_worker_pool::Response::Accepted202(rsp_value))
}
http::StatusCode::BAD_REQUEST => Err(create_or_update_worker_pool::Error::BadRequest400 {}),
http::StatusCode::NOT_FOUND => Err(create_or_update_worker_pool::Error::NotFound404 {}),
http::StatusCode::CONFLICT => Err(create_or_update_worker_pool::Error::Conflict409 {}),
status_code => {
let rsp_body = rsp.body();
Err(create_or_update_worker_pool::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update_worker_pool {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::WorkerPoolResource),
Accepted202(models::WorkerPoolResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
BadRequest400 {},
#[error("Error response #response_type")]
NotFound404 {},
#[error("Error response #response_type")]
Conflict409 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update_worker_pool(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
worker_pool_name: &str,
worker_pool_envelope: &models::WorkerPoolResource,
subscription_id: &str,
) -> std::result::Result<update_worker_pool::Response, update_worker_pool::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/workerPools/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
worker_pool_name
);
let mut url = url::Url::parse(url_str).map_err(update_worker_pool::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update_worker_pool::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(worker_pool_envelope).map_err(update_worker_pool::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update_worker_pool::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(update_worker_pool::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolResource = serde_json::from_slice(rsp_body)
.map_err(|source| update_worker_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update_worker_pool::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::WorkerPoolResource = serde_json::from_slice(rsp_body)
.map_err(|source| update_worker_pool::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update_worker_pool::Response::Accepted202(rsp_value))
}
http::StatusCode::BAD_REQUEST => Err(update_worker_pool::Error::BadRequest400 {}),
http::StatusCode::NOT_FOUND => Err(update_worker_pool::Error::NotFound404 {}),
http::StatusCode::CONFLICT => Err(update_worker_pool::Error::Conflict409 {}),
status_code => {
let rsp_body = rsp.body();
Err(update_worker_pool::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update_worker_pool {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::WorkerPoolResource),
Accepted202(models::WorkerPoolResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
BadRequest400 {},
#[error("Error response #response_type")]
NotFound404 {},
#[error("Error response #response_type")]
Conflict409 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_worker_pool_instance_metric_definitions(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
worker_pool_name: &str,
instance: &str,
subscription_id: &str,
) -> std::result::Result<models::ResourceMetricDefinitionCollection, list_worker_pool_instance_metric_definitions::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/workerPools/{}/instances/{}/metricdefinitions" , operation_config . base_path () , subscription_id , resource_group_name , name , worker_pool_name , instance) ;
let mut url = url::Url::parse(url_str).map_err(list_worker_pool_instance_metric_definitions::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_worker_pool_instance_metric_definitions::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_worker_pool_instance_metric_definitions::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_worker_pool_instance_metric_definitions::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceMetricDefinitionCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_worker_pool_instance_metric_definitions::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_worker_pool_instance_metric_definitions::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_worker_pool_instance_metric_definitions {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_worker_pool_instance_metrics(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
worker_pool_name: &str,
instance: &str,
details: Option<bool>,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::ResourceMetricCollection, list_worker_pool_instance_metrics::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/workerPools/{}/instances/{}/metrics",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
worker_pool_name,
instance
);
let mut url = url::Url::parse(url_str).map_err(list_worker_pool_instance_metrics::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_worker_pool_instance_metrics::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(details) = details {
url.query_pairs_mut().append_pair("details", details.to_string().as_str());
}
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_worker_pool_instance_metrics::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_worker_pool_instance_metrics::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceMetricCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_worker_pool_instance_metrics::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_worker_pool_instance_metrics::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_worker_pool_instance_metrics {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_web_worker_metric_definitions(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
worker_pool_name: &str,
subscription_id: &str,
) -> std::result::Result<models::ResourceMetricDefinitionCollection, list_web_worker_metric_definitions::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/workerPools/{}/metricdefinitions",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
worker_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list_web_worker_metric_definitions::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_web_worker_metric_definitions::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_web_worker_metric_definitions::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_web_worker_metric_definitions::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceMetricDefinitionCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_web_worker_metric_definitions::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_web_worker_metric_definitions::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_web_worker_metric_definitions {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_web_worker_metrics(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
worker_pool_name: &str,
details: Option<bool>,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::ResourceMetricCollection, list_web_worker_metrics::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/workerPools/{}/metrics",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
worker_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list_web_worker_metrics::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_web_worker_metrics::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(details) = details {
url.query_pairs_mut().append_pair("details", details.to_string().as_str());
}
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_web_worker_metrics::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_web_worker_metrics::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceMetricCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_web_worker_metrics::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_web_worker_metrics::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_web_worker_metrics {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_worker_pool_skus(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
worker_pool_name: &str,
subscription_id: &str,
) -> std::result::Result<models::SkuInfoCollection, list_worker_pool_skus::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/workerPools/{}/skus",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
worker_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list_worker_pool_skus::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_worker_pool_skus::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_worker_pool_skus::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_worker_pool_skus::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::SkuInfoCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_worker_pool_skus::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_worker_pool_skus::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_worker_pool_skus {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_web_worker_usages(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
worker_pool_name: &str,
subscription_id: &str,
) -> std::result::Result<models::UsageCollection, list_web_worker_usages::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/hostingEnvironments/{}/workerPools/{}/usages",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
worker_pool_name
);
let mut url = url::Url::parse(url_str).map_err(list_web_worker_usages::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_web_worker_usages::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_web_worker_usages::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_web_worker_usages::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::UsageCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_web_worker_usages::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_web_worker_usages::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_web_worker_usages {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod app_service_plans {
use super::{models, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
detailed: Option<bool>,
subscription_id: &str,
) -> std::result::Result<models::AppServicePlanCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Web/serverfarms",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(detailed) = detailed {
url.query_pairs_mut().append_pair("detailed", detailed.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppServicePlanCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<models::AppServicePlanCollection, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppServicePlanCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_resource_group::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_resource_group {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::AppServicePlan, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppServicePlan =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
http::StatusCode::NOT_FOUND => Err(get::Error::NotFound404 {}),
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
app_service_plan: &models::AppServicePlan,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(app_service_plan).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppServicePlan = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::AppServicePlan = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::AppServicePlan = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::AppServicePlan),
Created201(models::AppServicePlan),
Accepted202(models::AppServicePlan),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
app_service_plan: &models::AppServicePlanPatchResource,
subscription_id: &str,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(app_service_plan).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::AppServicePlan =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: models::AppServicePlan =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::AppServicePlan),
Accepted202(models::AppServicePlan),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_capabilities(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<Vec<models::Capability>, list_capabilities::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/capabilities",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_capabilities::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_capabilities::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_capabilities::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_capabilities::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<models::Capability> = serde_json::from_slice(rsp_body)
.map_err(|source| list_capabilities::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_capabilities::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_capabilities {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_hybrid_connection(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
namespace_name: &str,
relay_name: &str,
subscription_id: &str,
) -> std::result::Result<models::HybridConnection, get_hybrid_connection::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/hybridConnectionNamespaces/{}/relays/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
namespace_name,
relay_name
);
let mut url = url::Url::parse(url_str).map_err(get_hybrid_connection::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_hybrid_connection::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_hybrid_connection::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_hybrid_connection::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::HybridConnection = serde_json::from_slice(rsp_body)
.map_err(|source| get_hybrid_connection::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_hybrid_connection::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_hybrid_connection {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete_hybrid_connection(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
namespace_name: &str,
relay_name: &str,
subscription_id: &str,
) -> std::result::Result<delete_hybrid_connection::Response, delete_hybrid_connection::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/hybridConnectionNamespaces/{}/relays/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
namespace_name,
relay_name
);
let mut url = url::Url::parse(url_str).map_err(delete_hybrid_connection::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete_hybrid_connection::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(delete_hybrid_connection::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(delete_hybrid_connection::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete_hybrid_connection::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete_hybrid_connection::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete_hybrid_connection::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete_hybrid_connection {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_hybrid_connection_keys(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
namespace_name: &str,
relay_name: &str,
subscription_id: &str,
) -> std::result::Result<models::HybridConnectionKey, list_hybrid_connection_keys::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/hybridConnectionNamespaces/{}/relays/{}/listKeys",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
namespace_name,
relay_name
);
let mut url = url::Url::parse(url_str).map_err(list_hybrid_connection_keys::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_hybrid_connection_keys::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_hybrid_connection_keys::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_hybrid_connection_keys::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::HybridConnectionKey = serde_json::from_slice(rsp_body)
.map_err(|source| list_hybrid_connection_keys::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_hybrid_connection_keys::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_hybrid_connection_keys {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_web_apps_by_hybrid_connection(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
namespace_name: &str,
relay_name: &str,
subscription_id: &str,
) -> std::result::Result<models::ResourceCollection, list_web_apps_by_hybrid_connection::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/hybridConnectionNamespaces/{}/relays/{}/sites",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
namespace_name,
relay_name
);
let mut url = url::Url::parse(url_str).map_err(list_web_apps_by_hybrid_connection::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_web_apps_by_hybrid_connection::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_web_apps_by_hybrid_connection::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_web_apps_by_hybrid_connection::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_web_apps_by_hybrid_connection::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_web_apps_by_hybrid_connection::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_web_apps_by_hybrid_connection {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_hybrid_connection_plan_limit(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::HybridConnectionLimits, get_hybrid_connection_plan_limit::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/hybridConnectionPlanLimits/limit",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(get_hybrid_connection_plan_limit::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_hybrid_connection_plan_limit::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_hybrid_connection_plan_limit::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_hybrid_connection_plan_limit::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::HybridConnectionLimits = serde_json::from_slice(rsp_body)
.map_err(|source| get_hybrid_connection_plan_limit::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_hybrid_connection_plan_limit::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_hybrid_connection_plan_limit {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_hybrid_connections(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::HybridConnectionCollection, list_hybrid_connections::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/hybridConnectionRelays",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_hybrid_connections::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_hybrid_connections::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_hybrid_connections::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_hybrid_connections::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::HybridConnectionCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_hybrid_connections::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_hybrid_connections::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_hybrid_connections {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_metric_defintions(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<models::ResourceMetricDefinitionCollection, list_metric_defintions::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/metricdefinitions",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_metric_defintions::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_metric_defintions::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_metric_defintions::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_metric_defintions::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceMetricDefinitionCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_metric_defintions::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_metric_defintions::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_metric_defintions {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_metrics(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
details: Option<bool>,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::ResourceMetricCollection, list_metrics::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/metrics",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_metrics::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(details) = details {
url.query_pairs_mut().append_pair("details", details.to_string().as_str());
}
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_metrics::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceMetricCollection =
serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_metrics::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_metrics {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn restart_web_apps(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
soft_restart: Option<bool>,
subscription_id: &str,
) -> std::result::Result<(), restart_web_apps::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/restartSites",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(restart_web_apps::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(restart_web_apps::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(soft_restart) = soft_restart {
url.query_pairs_mut().append_pair("softRestart", soft_restart.to_string().as_str());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(restart_web_apps::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(restart_web_apps::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(restart_web_apps::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod restart_web_apps {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_web_apps(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
skip_token: Option<&str>,
filter: Option<&str>,
top: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::WebAppCollection, list_web_apps::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/sites",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_web_apps::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_web_apps::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(skip_token) = skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(top) = top {
url.query_pairs_mut().append_pair("$top", top);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_web_apps::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_web_apps::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::WebAppCollection =
serde_json::from_slice(rsp_body).map_err(|source| list_web_apps::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_web_apps::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_web_apps {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_server_farm_skus(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<serde_json::Value, get_server_farm_skus::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/skus",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(get_server_farm_skus::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_server_farm_skus::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_server_farm_skus::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_server_farm_skus::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: serde_json::Value = serde_json::from_slice(rsp_body)
.map_err(|source| get_server_farm_skus::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_server_farm_skus::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_server_farm_skus {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_usages(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<models::CsmUsageQuotaCollection, list_usages::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/usages",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_usages::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_usages::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_usages::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_usages::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::CsmUsageQuotaCollection =
serde_json::from_slice(rsp_body).map_err(|source| list_usages::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_usages::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_usages {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_vnets(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
subscription_id: &str,
) -> std::result::Result<Vec<models::VnetInfo>, list_vnets::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/virtualNetworkConnections",
operation_config.base_path(),
subscription_id,
resource_group_name,
name
);
let mut url = url::Url::parse(url_str).map_err(list_vnets::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_vnets::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_vnets::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_vnets::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<models::VnetInfo> =
serde_json::from_slice(rsp_body).map_err(|source| list_vnets::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_vnets::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_vnets {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_vnet_from_server_farm(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
vnet_name: &str,
subscription_id: &str,
) -> std::result::Result<models::VnetInfo, get_vnet_from_server_farm::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/virtualNetworkConnections/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
vnet_name
);
let mut url = url::Url::parse(url_str).map_err(get_vnet_from_server_farm::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_vnet_from_server_farm::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_vnet_from_server_farm::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_vnet_from_server_farm::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::VnetInfo = serde_json::from_slice(rsp_body)
.map_err(|source| get_vnet_from_server_farm::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
http::StatusCode::NOT_FOUND => Err(get_vnet_from_server_farm::Error::NotFound404 {}),
status_code => {
let rsp_body = rsp.body();
Err(get_vnet_from_server_farm::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_vnet_from_server_farm {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_vnet_gateway(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
vnet_name: &str,
gateway_name: &str,
subscription_id: &str,
) -> std::result::Result<models::VnetGateway, get_vnet_gateway::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/virtualNetworkConnections/{}/gateways/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
vnet_name,
gateway_name
);
let mut url = url::Url::parse(url_str).map_err(get_vnet_gateway::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_vnet_gateway::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_vnet_gateway::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_vnet_gateway::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::VnetGateway = serde_json::from_slice(rsp_body)
.map_err(|source| get_vnet_gateway::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get_vnet_gateway::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_vnet_gateway {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update_vnet_gateway(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
vnet_name: &str,
gateway_name: &str,
connection_envelope: &models::VnetGateway,
subscription_id: &str,
) -> std::result::Result<models::VnetGateway, update_vnet_gateway::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/virtualNetworkConnections/{}/gateways/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
vnet_name,
gateway_name
);
let mut url = url::Url::parse(url_str).map_err(update_vnet_gateway::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update_vnet_gateway::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(connection_envelope).map_err(update_vnet_gateway::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update_vnet_gateway::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(update_vnet_gateway::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::VnetGateway = serde_json::from_slice(rsp_body)
.map_err(|source| update_vnet_gateway::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(update_vnet_gateway::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update_vnet_gateway {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_routes_for_vnet(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
vnet_name: &str,
subscription_id: &str,
) -> std::result::Result<Vec<models::VnetRoute>, list_routes_for_vnet::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/virtualNetworkConnections/{}/routes",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
vnet_name
);
let mut url = url::Url::parse(url_str).map_err(list_routes_for_vnet::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_routes_for_vnet::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_routes_for_vnet::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_routes_for_vnet::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<models::VnetRoute> = serde_json::from_slice(rsp_body)
.map_err(|source| list_routes_for_vnet::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_routes_for_vnet::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_routes_for_vnet {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get_route_for_vnet(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
vnet_name: &str,
route_name: &str,
subscription_id: &str,
) -> std::result::Result<Vec<models::VnetRoute>, get_route_for_vnet::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/virtualNetworkConnections/{}/routes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
vnet_name,
route_name
);
let mut url = url::Url::parse(url_str).map_err(get_route_for_vnet::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_route_for_vnet::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_route_for_vnet::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_route_for_vnet::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Vec<models::VnetRoute> = serde_json::from_slice(rsp_body)
.map_err(|source| get_route_for_vnet::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
http::StatusCode::NOT_FOUND => Err(get_route_for_vnet::Error::NotFound404 {}),
status_code => {
let rsp_body = rsp.body();
Err(get_route_for_vnet::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get_route_for_vnet {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update_vnet_route(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
vnet_name: &str,
route_name: &str,
route: &models::VnetRoute,
subscription_id: &str,
) -> std::result::Result<models::VnetRoute, create_or_update_vnet_route::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/virtualNetworkConnections/{}/routes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
vnet_name,
route_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update_vnet_route::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update_vnet_route::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(route).map_err(create_or_update_vnet_route::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(create_or_update_vnet_route::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update_vnet_route::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::VnetRoute = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update_vnet_route::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
http::StatusCode::BAD_REQUEST => Err(create_or_update_vnet_route::Error::BadRequest400 {}),
http::StatusCode::NOT_FOUND => Err(create_or_update_vnet_route::Error::NotFound404 {}),
status_code => {
let rsp_body = rsp.body();
Err(create_or_update_vnet_route::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update_vnet_route {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
BadRequest400 {},
#[error("Error response #response_type")]
NotFound404 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update_vnet_route(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
vnet_name: &str,
route_name: &str,
route: &models::VnetRoute,
subscription_id: &str,
) -> std::result::Result<models::VnetRoute, update_vnet_route::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/virtualNetworkConnections/{}/routes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
vnet_name,
route_name
);
let mut url = url::Url::parse(url_str).map_err(update_vnet_route::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update_vnet_route::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(route).map_err(update_vnet_route::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update_vnet_route::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(update_vnet_route::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::VnetRoute = serde_json::from_slice(rsp_body)
.map_err(|source| update_vnet_route::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
http::StatusCode::BAD_REQUEST => Err(update_vnet_route::Error::BadRequest400 {}),
http::StatusCode::NOT_FOUND => Err(update_vnet_route::Error::NotFound404 {}),
status_code => {
let rsp_body = rsp.body();
Err(update_vnet_route::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update_vnet_route {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
BadRequest400 {},
#[error("Error response #response_type")]
NotFound404 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete_vnet_route(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
vnet_name: &str,
route_name: &str,
subscription_id: &str,
) -> std::result::Result<(), delete_vnet_route::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/virtualNetworkConnections/{}/routes/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
vnet_name,
route_name
);
let mut url = url::Url::parse(url_str).map_err(delete_vnet_route::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete_vnet_route::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete_vnet_route::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(delete_vnet_route::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
http::StatusCode::NOT_FOUND => Err(delete_vnet_route::Error::NotFound404 {}),
status_code => {
let rsp_body = rsp.body();
Err(delete_vnet_route::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete_vnet_route {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn reboot_worker(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
name: &str,
worker_name: &str,
subscription_id: &str,
) -> std::result::Result<(), reboot_worker::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}/workers/{}/reboot",
operation_config.base_path(),
subscription_id,
resource_group_name,
name,
worker_name
);
let mut url = url::Url::parse(url_str).map_err(reboot_worker::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(reboot_worker::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(reboot_worker::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(reboot_worker::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(reboot_worker::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod reboot_worker {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
|
use fnd::*;
#[cfg(target_env = "msvc")]
#[link(name = "libcmt")]
extern "C" {
fn exit(status: i32) -> !;
}
#[cfg(not(test))]
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> !
{
println!("{}", info);
unsafe {
exit(1);
}
}
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn rust_eh_personality() {}
|
#[doc = "Register `AWD3TR` reader"]
pub type R = crate::R<AWD3TR_SPEC>;
#[doc = "Register `AWD3TR` writer"]
pub type W = crate::W<AWD3TR_SPEC>;
#[doc = "Field `LT3` reader - Analog watchdog 3lower threshold These bits are written by software to define the lower threshold for the analog watchdog. Refer to ADC_AWDxTR) on page 407."]
pub type LT3_R = crate::FieldReader<u16>;
#[doc = "Field `LT3` writer - Analog watchdog 3lower threshold These bits are written by software to define the lower threshold for the analog watchdog. Refer to ADC_AWDxTR) on page 407."]
pub type LT3_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 12, O, u16>;
#[doc = "Field `HT3` reader - Analog watchdog 3 higher threshold These bits are written by software to define the higher threshold for the analog watchdog. Refer to ADC_AWDxTR) on page 407."]
pub type HT3_R = crate::FieldReader<u16>;
#[doc = "Field `HT3` writer - Analog watchdog 3 higher threshold These bits are written by software to define the higher threshold for the analog watchdog. Refer to ADC_AWDxTR) on page 407."]
pub type HT3_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 12, O, u16>;
impl R {
#[doc = "Bits 0:11 - Analog watchdog 3lower threshold These bits are written by software to define the lower threshold for the analog watchdog. Refer to ADC_AWDxTR) on page 407."]
#[inline(always)]
pub fn lt3(&self) -> LT3_R {
LT3_R::new((self.bits & 0x0fff) as u16)
}
#[doc = "Bits 16:27 - Analog watchdog 3 higher threshold These bits are written by software to define the higher threshold for the analog watchdog. Refer to ADC_AWDxTR) on page 407."]
#[inline(always)]
pub fn ht3(&self) -> HT3_R {
HT3_R::new(((self.bits >> 16) & 0x0fff) as u16)
}
}
impl W {
#[doc = "Bits 0:11 - Analog watchdog 3lower threshold These bits are written by software to define the lower threshold for the analog watchdog. Refer to ADC_AWDxTR) on page 407."]
#[inline(always)]
#[must_use]
pub fn lt3(&mut self) -> LT3_W<AWD3TR_SPEC, 0> {
LT3_W::new(self)
}
#[doc = "Bits 16:27 - Analog watchdog 3 higher threshold These bits are written by software to define the higher threshold for the analog watchdog. Refer to ADC_AWDxTR) on page 407."]
#[inline(always)]
#[must_use]
pub fn ht3(&mut self) -> HT3_W<AWD3TR_SPEC, 16> {
HT3_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "ADC watchdog threshold register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`awd3tr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`awd3tr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct AWD3TR_SPEC;
impl crate::RegisterSpec for AWD3TR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`awd3tr::R`](R) reader structure"]
impl crate::Readable for AWD3TR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`awd3tr::W`](W) writer structure"]
impl crate::Writable for AWD3TR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets AWD3TR to value 0x0fff_0000"]
impl crate::Resettable for AWD3TR_SPEC {
const RESET_VALUE: Self::Ux = 0x0fff_0000;
}
|
use std::collections::{HashMap, HashSet};
/*
I tried to implement StructOpt here but decided against it
// use structopt::StructOpt;
// Providing a command line argument to switch between the exact
// and approximated calculation of the Busy time period
#[derive(Debug, StructOpt)]
struct ExCalc {
// This option can be specified by -e
#[structopt(short)]
exact_calculation: bool,
}
*/
// common data structures
#[derive(Debug)]
pub struct Task {
pub id: String,
pub prio: u8,
pub deadline: u32,
pub inter_arrival: u32,
pub trace: Trace,
}
//#[derive(Debug, Clone)]
#[derive(Debug)]
pub struct Trace {
pub id: String,
pub start: u32,
pub end: u32,
pub inner: Vec<Trace>,
}
// Used for the final display
#[derive(Debug)]
pub struct TaskAnalysis {
pub task: String,
pub rt: u32,
pub ct: u32,
pub bt: u32,
pub it: u32,
}
// Type to document the resource blocking
#[derive(Debug)]
pub struct BlockingFiller {
pub resource: String,
pub time: u32,
pub prio: u8,
}
// uselful types
// Our task set
pub type Tasks = Vec<Task>;
// A map from Task/Resource identifiers to priority
pub type IdPrio = HashMap<String, u8>;
// A map from Task identifiers to a set of Resource identifiers
pub type TaskResources = HashMap<String, HashSet<String>>;
// A map from Task with intertimings
pub type InterTimings = HashMap<String, u32>;
// A map from Traces with WCET timings
pub type Ct = HashMap<String, u32>;
// A blocking vector to list which Task is blocking which Resource for how long
pub type BlockingVector = Vec<BlockingFiller>;
// A map of the busy times of each task
pub type Bpt = HashMap<String, u32>;
// A map of the response times of each task
pub type ResponseTime = HashMap<String, u32>;
// A map of the interference to each task
pub type Interference = HashMap<String, u32>;
// A map from Traces with blocking timings
pub type BlockingTime = HashMap<String, u32>;
// A special data type for the final display form of all results
pub type FinalDisplay = Vec<TaskAnalysis>;
// Derives the above maps from a set of tasks
pub fn pre_analysis(tasks: &Tasks) -> (IdPrio, TaskResources) {
let mut ip = HashMap::new();
let mut tr: TaskResources = HashMap::new();
for t in tasks {
update_prio(t.prio, &t.trace, &mut ip);
for i in &t.trace.inner {
update_tr(t.id.clone(), i, &mut tr);
}
}
(ip, tr)
}
// helper functions
fn update_prio(prio: u8, trace: &Trace, hm: &mut IdPrio) {
if let Some(old_prio) = hm.get(&trace.id) {
if prio > *old_prio {
hm.insert(trace.id.clone(), prio);
}
} else {
hm.insert(trace.id.clone(), prio);
}
for cs in &trace.inner {
update_prio(prio, cs, hm);
}
}
fn update_tr(s: String, trace: &Trace, trmap: &mut TaskResources) {
if let Some(seen) = trmap.get_mut(&s) {
seen.insert(trace.id.clone());
} else {
let mut hs = HashSet::new();
hs.insert(trace.id.clone());
trmap.insert(s.clone(), hs);
}
for trace in &trace.inner {
update_tr(s.clone(), trace, trmap);
}
}
pub fn readin_u32(task: &Task, hin: &HashMap<String,u32>) -> u32 {
let mut out: u32 = 0;
if let Some(value) = hin.get(&task.id) {
out = *value;
}
out
}
|
#![allow(non_upper_case_globals)]
use regex::bytes::Regex;
use crate::compiler::error::{Error, Result};
lazy_static! {
static ref re_integer: Regex = Regex::new(r#"^[+-]?[0-9]+$|^-?0x[0-9a-f]+$"#).unwrap();
static ref re_hex_float: Regex = Regex::new(r#"^([0-9a-f]+(\.[0-9a-f]*)?|([0-9a-f]*\.[0-9a-f]+))(p[+\-]?[0-9]+)?$"#).unwrap();
}
pub fn parse_float(num: String) -> Result<f64> {
// todo: supports total syntax
num.parse::<f64>().or(Err(Error::IllegalToken {
line: 0,
}))
}
pub fn parse_integer(num: String) -> Result<i64> {
num.parse::<i64>().or(Err(Error::IllegalToken {
line: 0,
}))
}
pub fn int_to_float_byte(mut x: isize) -> isize {
let mut e = 0;
if x < 8 {
return x;
}
while x >= (8 << 4) {
x = (x + 0xf) >> 4;
e += 4;
}
while x >= (8 << 1) {
x = (x + 1) >> 1;
e += 1;
}
((e + 1) << 3) | (x - 8)
} |
use super::razer_report::{Color, RazerMouseMatrixEffectId, RazerReport, RazerVarstore};
use super::{Device, DeviceFactory};
use errors::Result;
use hidapi::HidDevice;
#[derive(Clone, Debug)]
pub struct MatrixMiceFactory {
name: &'static str,
led_ids: &'static [u8],
}
impl MatrixMiceFactory {
pub fn new(name: &'static str, led_ids: &'static [u8]) -> Box<MatrixMiceFactory> {
Box::new(MatrixMiceFactory { name, led_ids })
}
}
impl DeviceFactory for MatrixMiceFactory {
fn name(&self) -> &'static str {
self.name
}
fn open(&self, hid_device: HidDevice) -> Box<Device> {
Box::new(MatrixMice {
name: self.name,
led_ids: self.led_ids,
hid_device,
})
}
}
pub struct MatrixMice {
name: &'static str,
led_ids: &'static [u8],
hid_device: HidDevice,
}
impl Device for MatrixMice {
fn name(&self) -> &'static str {
self.name
}
fn hid_device<'a>(&'a self) -> &'a HidDevice {
&self.hid_device
}
fn get_brightness(&self) -> Result<u8> {
self.send_report(RazerReport::extended_matrix_get_brightness(
RazerVarstore::Store,
self.led_ids[0],
))?;
Ok(0)
}
fn set_brightness(&self, brightness: u8) -> Result<()> {
for led_id in self.led_ids {
self.send_report(RazerReport::extended_matrix_set_brightness(
RazerVarstore::Store,
*led_id,
brightness,
))?;
}
Ok(())
}
fn set_color(&self, color: Color) -> Result<()> {
for led_id in self.led_ids {
let mut report = RazerReport::extended_mouse_matrix_effect(
RazerVarstore::Store,
*led_id,
RazerMouseMatrixEffectId::Static,
);
report.arguments[5] = 1;
report.arguments[6] = color.red;
report.arguments[7] = color.green;
report.arguments[8] = color.blue;
self.send_report(report)?;
}
Ok(())
}
}
|
mod bft_orswot;
mod bft_orswot_net;
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - SYSCFG configuration register 1"]
pub cfgr1: CFGR1,
_reserved1: [u8; 0x14],
#[doc = "0x18 - SYSCFG configuration register 1"]
pub cfgr2: CFGR2,
}
#[doc = "CFGR1 (rw) register accessor: SYSCFG configuration register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cfgr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cfgr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`cfgr1`]
module"]
pub type CFGR1 = crate::Reg<cfgr1::CFGR1_SPEC>;
#[doc = "SYSCFG configuration register 1"]
pub mod cfgr1;
#[doc = "CFGR2 (rw) register accessor: SYSCFG configuration register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cfgr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cfgr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`cfgr2`]
module"]
pub type CFGR2 = crate::Reg<cfgr2::CFGR2_SPEC>;
#[doc = "SYSCFG configuration register 1"]
pub mod cfgr2;
|
use std::collections::HashMap;
use serde::{Serialize,Deserialize};
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleFirestoreAdminv1IndexField {
#[serde(rename="fieldPath")]
pub field_path: Option<String>,
pub mode: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListenResponse {
pub filter: Option<ExistenceFilter>,
#[serde(rename="targetChange")]
pub target_change: Option<TargetChange>,
#[serde(rename="documentDelete")]
pub document_delete: Option<DocumentDelete>,
#[serde(rename="documentChange")]
pub document_change: Option<DocumentChange>,
#[serde(rename="documentRemove")]
pub document_remove: Option<DocumentRemove>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct BeginTransactionResponse {
pub transaction: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Write {
pub delete: Option<String>,
#[serde(rename="currentDocument")]
pub current_document: Option<Precondition>,
pub update: Option<Document>,
pub transform: Option<DocumentTransform>,
#[serde(rename="updateMask")]
pub update_mask: Option<DocumentMask>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[allow(non_camel_case_types)]
pub enum FieldOperator {
OPERATOR_UNSPECIFIED, // Unspecified. This value must not be used.
LESS_THAN, // Less than. Requires that the field come first in orderBy.
LESS_THAN_OR_EQUAL, // Less than or equal. Requires that the field come first in orderBy.
GREATER_THAN, // Greater than. Requires that the field come first in orderBy.
GREATER_THAN_OR_EQUAL, // Greater than or equal. Requires that the field come first in orderBy.
EQUAL, // Equal.
ARRAY_CONTAINS, // Contains. Requires that the field is an array.
}
impl Default for FieldOperator {
fn default() -> Self {
FieldOperator::OPERATOR_UNSPECIFIED
}
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct FieldFilter {
pub field: FieldReference,
pub value: Value,
pub op: FieldOperator,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleFirestoreAdminv1ImportDocumentsRequest {
#[serde(rename="inputUriPrefix")]
pub input_uri_prefix: Option<String>,
#[serde(rename="collectionIds")]
pub collection_ids: Option<Vec<String>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Document {
pub fields: Option<HashMap<String, Value>>,
#[serde(rename="updateTime")]
#[serde(skip_serializing_if = "Option::is_none")]
pub update_time: Option<String>,
#[serde(rename="createTime")]
#[serde(skip_serializing_if = "Option::is_none")]
pub create_time: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleFirestoreAdminv1ListIndexesResponse {
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
pub indexes: Option<Vec<GoogleFirestoreAdminv1Index>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct BatchGetDocumentsResponse {
pub found: Option<Document>,
pub transaction: Option<String>,
#[serde(rename="readTime")]
pub read_time: Option<String>,
pub missing: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Status {
pub message: Option<String>,
pub code: Option<i32>,
pub details: Option<Vec<HashMap<String, String>>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListenRequest {
pub labels: Option<HashMap<String, String>>,
#[serde(rename="addTarget")]
pub add_target: Option<Target>,
#[serde(rename="removeTarget")]
pub remove_target: Option<i32>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct RunQueryRequest {
#[serde(rename="newTransaction")]
#[serde(skip_serializing_if = "Option::is_none")]
pub new_transaction: Option<TransactionOptions>,
pub transaction: Option<String>,
#[serde(rename="structuredQuery")]
#[serde(skip_serializing_if = "Option::is_none")]
pub structured_query: Option<StructuredQuery>,
#[serde(rename="readTime")]
#[serde(skip_serializing_if = "Option::is_none")]
pub read_time: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct FieldReference {
#[serde(rename="fieldPath")]
pub field_path: String,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct UnaryFilter {
pub field: FieldReference,
pub op: String,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ArrayValue {
pub values: Vec<Value>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DocumentMask {
#[serde(rename="fieldPaths")]
pub field_paths: Vec<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct CompositeFilter {
pub filters: Vec<Filter>,
pub op: String,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Empty { _never_set: Option<bool> }
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Filter {
#[serde(rename="unaryFilter")]
#[serde(skip_serializing_if = "Option::is_none")]
pub unary_filter: Option<UnaryFilter>,
#[serde(rename="fieldFilter")]
#[serde(skip_serializing_if = "Option::is_none")]
pub field_filter: Option<FieldFilter>,
#[serde(rename="compositeFilter")]
#[serde(skip_serializing_if = "Option::is_none")]
pub composite_filter: Option<CompositeFilter>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct WriteResponse {
#[serde(rename="writeResults")]
pub write_results: Option<Vec<WriteResult>>,
#[serde(rename="streamToken")]
pub stream_token: Option<String>,
#[serde(rename="commitTime")]
pub commit_time: Option<String>,
#[serde(rename="streamId")]
pub stream_id: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListCollectionIdsRequest {
#[serde(rename="pageToken")]
pub page_token: Option<String>,
#[serde(rename="pageSize")]
pub page_size: Option<i32>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct BatchGetDocumentsRequest {
#[serde(rename="newTransaction")]
#[serde(skip_serializing_if = "Option::is_none")]
pub new_transaction: Option<TransactionOptions>,
#[serde(skip_serializing_if = "Option::is_none")]
pub transaction: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub mask: Option<DocumentMask>,
#[serde(skip_serializing_if = "Option::is_none")]
pub documents: Option<Vec<String>>,
#[serde(rename="readTime")]
#[serde(skip_serializing_if = "Option::is_none")]
pub read_time: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct MapValue {
pub fields:HashMap<String, Value>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TransactionOptions {
#[serde(rename="readWrite")]
pub read_write: Option<ReadWrite>,
#[serde(rename="readOnly")]
pub read_only: Option<ReadOnly>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct CommitResponse {
#[serde(rename="writeResults")]
pub write_results: Option<Vec<WriteResult>>,
#[serde(rename="commitTime")]
pub commit_time: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Target {
pub documents: Option<DocumentsTarget>,
pub once: Option<bool>,
pub query: Option<QueryTarget>,
#[serde(rename="resumeToken")]
pub resume_token: Option<String>,
#[serde(rename="targetId")]
pub target_id: Option<i32>,
#[serde(rename="readTime")]
pub read_time: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ExistenceFilter {
pub count: Option<i32>,
#[serde(rename="targetId")]
pub target_id: Option<i32>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DocumentsTarget {
pub documents: Option<Vec<String>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Precondition {
#[serde(rename="updateTime")]
pub update_time: Option<String>,
pub exists: Option<bool>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Value {
#[serde(rename="bytesValue")]
#[serde(skip_serializing_if = "Option::is_none")]
pub bytes_value: Option<String>,
#[serde(rename="timestampValue")]
#[serde(skip_serializing_if = "Option::is_none")]
pub timestamp_value: Option<String>,
#[serde(rename="geoPointValue")]
#[serde(skip_serializing_if = "Option::is_none")]
pub geo_point_value: Option<LatLng>,
#[serde(rename="referenceValue")]
#[serde(skip_serializing_if = "Option::is_none")]
pub reference_value: Option<String>,
#[serde(rename="doubleValue")]
#[serde(skip_serializing_if = "Option::is_none")]
pub double_value: Option<f64>,
#[serde(rename="mapValue")]
#[serde(skip_serializing_if = "Option::is_none")]
pub map_value: Option<MapValue>,
#[serde(rename="stringValue")]
#[serde(skip_serializing_if = "Option::is_none")]
pub string_value: Option<String>,
#[serde(rename="booleanValue")]
#[serde(skip_serializing_if = "Option::is_none")]
pub boolean_value: Option<bool>,
#[serde(rename="arrayValue")]
#[serde(skip_serializing_if = "Option::is_none")]
pub array_value: Option<ArrayValue>,
#[serde(rename="integerValue")]
#[serde(skip_serializing_if = "Option::is_none")]
pub integer_value: Option<String>,
#[serde(rename="nullValue")]
#[serde(skip_serializing_if = "Option::is_none")]
pub null_value: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Cursor {
pub values: Option<Vec<Value>>,
pub before: Option<bool>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct CollectionSelector {
#[serde(rename="allDescendants")]
pub all_descendants: Option<bool>,
#[serde(rename="collectionId")]
pub collection_id: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleFirestoreAdminv1Index {
pub fields: Option<Vec<GoogleFirestoreAdminv1IndexField>>,
pub state: Option<String>,
pub name: Option<String>,
#[serde(rename="collectionId")]
pub collection_id: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct StructuredQuery {
#[serde(rename="orderBy")]
#[serde(skip_serializing_if = "Option::is_none")]
pub order_by: Option<Vec<Order>>,
#[serde(rename="startAt")]
#[serde(skip_serializing_if = "Option::is_none")]
pub start_at: Option<Cursor>,
#[serde(rename="endAt")]
#[serde(skip_serializing_if = "Option::is_none")]
pub end_at: Option<Cursor>,
pub limit: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub offset: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub from: Option<Vec<CollectionSelector>>,
#[serde(rename="where")]
#[serde(skip_serializing_if = "Option::is_none")]
pub where_: Option<Filter>,
#[serde(skip_serializing_if = "Option::is_none")]
pub select: Option<Projection>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct FieldTransform {
#[serde(rename="fieldPath")]
pub field_path: Option<String>,
#[serde(rename="appendMissingElements")]
pub append_missing_elements: Option<ArrayValue>,
#[serde(rename="setToServerValue")]
pub set_to_server_value: Option<String>,
#[serde(rename="removeAllFromArray")]
pub remove_all_from_array: Option<ArrayValue>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DocumentDelete {
#[serde(rename="removedTargetIds")]
#[serde(skip_serializing_if = "Option::is_none")]
pub removed_target_ids: Option<Vec<i32>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub document: Option<String>,
#[serde(rename="readTime")]
#[serde(skip_serializing_if = "Option::is_none")]
pub read_time: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleFirestoreAdminv1ExportDocumentsRequest {
#[serde(rename="outputUriPrefix")]
pub output_uri_prefix: Option<String>,
#[serde(rename="collectionIds")]
pub collection_ids: Option<Vec<String>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Order {
#[serde(skip_serializing_if = "Option::is_none")]
pub field: Option<FieldReference>,
#[serde(skip_serializing_if = "Option::is_none")]
pub direction: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TargetChange {
#[serde(rename="resumeToken")]
pub resume_token: Option<String>,
#[serde(rename="targetChangeType")]
pub target_change_type: Option<String>,
pub cause: Option<Status>,
#[serde(rename="targetIds")]
pub target_ids: Option<Vec<i32>>,
#[serde(rename="readTime")]
pub read_time: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct RunQueryResponse {
#[serde(rename="skippedResults")]
pub skipped_results: Option<i32>,
pub transaction: Option<String>,
pub document: Option<Document>,
#[serde(rename="readTime")]
pub read_time: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListCollectionIdsResponse {
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
#[serde(rename="collectionIds")]
pub collection_ids: Option<Vec<String>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct CommitRequest {
pub writes: Option<Vec<Write>>,
pub transaction: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Projection {
pub fields: Option<Vec<FieldReference>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListDocumentsResponse {
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
pub documents: Option<Vec<Document>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ReadWrite {
#[serde(rename="retryTransaction")]
pub retry_transaction: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleLongrunningOperation {
pub error: Option<Status>,
pub done: Option<bool>,
pub response: Option<HashMap<String, String>>,
pub name: Option<String>,
pub metadata: Option<HashMap<String, String>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct LatLng {
pub latitude: Option<f64>,
pub longitude: Option<f64>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DocumentChange {
#[serde(rename="removedTargetIds")]
pub removed_target_ids: Option<Vec<i32>>,
pub document: Option<Document>,
#[serde(rename="targetIds")]
pub target_ids: Option<Vec<i32>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DocumentRemove {
#[serde(rename="removedTargetIds")]
pub removed_target_ids: Option<Vec<i32>>,
pub document: Option<String>,
#[serde(rename="readTime")]
pub read_time: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct RollbackRequest {
pub transaction: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ReadOnly {
#[serde(rename="readTime")]
pub read_time: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct BeginTransactionRequest {
pub options: Option<TransactionOptions>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DocumentTransform {
pub document: Option<String>,
#[serde(rename="fieldTransforms")]
pub field_transforms: Option<Vec<FieldTransform>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct WriteResult {
#[serde(rename="updateTime")]
pub update_time: Option<String>,
#[serde(rename="transformResults")]
pub transform_results: Option<Vec<Value>>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct QueryTarget {
#[serde(rename="structuredQuery")]
pub structured_query: Option<StructuredQuery>,
pub parent: Option<String>,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct WriteRequest {
pub writes: Option<Vec<Write>>,
pub labels: Option<HashMap<String, String>>,
#[serde(rename="streamToken")]
pub stream_token: Option<String>,
#[serde(rename="streamId")]
pub stream_id: Option<String>,
}
|
extern crate cascading_ui;
extern crate wasm_bindgen_test;
use self::{
cascading_ui::{test_header, test_setup},
wasm_bindgen_test::wasm_bindgen_test,
};
test_header!();
#[wasm_bindgen_test]
fn dynamic() {
test_setup! {
text: "click me";
.a {
?click {
text: "I've been clicked!";
b {
text: "hello world";
}
}
color: "blue";
text: "click me too";
}
?click {
a {}
.b {
color: "green";
text: "nope";
}
}
}
assert_eq!(
root.first_child()
.expect("the root should contain a node")
.text_content()
.expect("the node should contain text"),
"click me"
);
root.click();
assert_eq!(
root.first_element_child()
.expect("the root should now contain an element")
.inner_html(),
"click me too"
);
root.first_element_child()
.expect("the root should now contain an element")
.dyn_into::<HtmlElement>()
.expect("this cast should work")
.click();
assert_eq!(
root.first_element_child()
.expect("the root should still contain an element")
.first_element_child()
.expect("that element should now contain an element")
.inner_html(),
"hello world"
);
}
|
use thiserror::Error;
#[derive(Error, Debug)]
pub enum LibVoponoError {
#[error("failed to add NetFilter rule")]
NetFilterError(String),
}
|
use super::{
account::{AccountRole, AccountType},
check, get_user_by_name, rand_str, Account, UserContext,
};
use crate::{config, Server};
use anyhow::anyhow;
use anyhow::bail;
use chrono::Local;
use ldap3::{LdapConnAsync, Scope, SearchEntry};
use log::{info, warn};
use spa_server::re_export::{
error::{ErrorBadRequest, ErrorInternalServerError},
get,
web::{self, Query},
HttpRequest, HttpResponse, Identity, Responder, Result,
};
use std::cell::RefCell;
use std::collections::HashMap;
use tokio::sync::Mutex;
pub(super) struct Ldap {
inner: Option<RefCell<ldap3::Ldap>>,
config: config::Ldap,
cache: Mutex<HashMap<String, LdapAccount>>,
}
struct LdapAccount {
username: String,
display_name: String,
email: String,
}
impl Ldap {
pub fn new() -> Self {
Ldap {
inner: None,
config: config::Ldap::default(),
cache: Mutex::new(HashMap::new()),
}
}
pub async fn connect(&mut self, cfg: config::Ldap) -> anyhow::Result<()> {
let (conn, inner) = LdapConnAsync::new(&format!("ldap://{}", &cfg.hostname)).await?;
ldap3::drive!(conn);
self.inner = Some(RefCell::new(inner));
self.config = cfg;
Ok(())
}
pub async fn search_user(&self, username: impl AsRef<str>) -> anyhow::Result<Option<Account>> {
let username = username.as_ref();
let mut cache = self.cache.lock().await;
let ldap_account = match cache.get(username) {
Some(a) => a,
None => {
self.login(&self.config.username, &self.config.password)
.await?;
let (result, _) = self
.inner
.as_ref()
.ok_or(anyhow!("ldap server not connected"))?
.borrow_mut()
.search(
&self.config.base_dn,
Scope::Subtree,
"(objectclass=person)",
vec!["sAMAccountName", "cn", "mail"],
)
.await?
.success()?;
let default = vec!["unknown".to_string()];
for r in result.into_iter() {
let attrs = SearchEntry::construct(r).attrs;
let cn = &attrs.get("cn").unwrap_or(&default)[0];
let sam = &attrs.get("sAMAccountName").unwrap_or(&default)[0];
let mail = &attrs.get("mail").unwrap_or(&default)[0];
let id = sam.clone();
cache.insert(
id,
LdapAccount {
username: sam.clone(),
display_name: cn.clone(),
email: mail.clone(),
},
);
}
match cache.get(username) {
Some(a) => a,
None => return Ok(None),
}
}
};
let mut account = Account::new(
&ldap_account.username,
AccountType::Ldap.as_ref(),
AccountRole::User.as_ref(),
);
account
.display_name(&ldap_account.display_name)
.email(&ldap_account.email);
Ok(Some(account))
}
async fn login(
&self,
username: impl AsRef<str>,
password: impl AsRef<str>,
) -> anyhow::Result<()> {
let r = self
.inner
.as_ref()
.ok_or(anyhow!("ldap server not connected"))?
.borrow_mut()
.simple_bind(
&format!("{}@{}", username.as_ref(), &self.config.domain),
password.as_ref(),
)
.await?
.success()?;
if r.rc != 0 {
bail!("error from ldap server: {}", r.text);
}
Ok(())
}
}
fn unauthorized(msg: impl Into<String>) -> Result<HttpResponse> {
Ok(HttpResponse::Unauthorized()
.append_header(("WWW-Authenticate", "Basic"))
.body(msg.into()))
}
const BASIC_MASK: &str = "Basic";
fn parse_auth(auth: impl AsRef<str>) -> anyhow::Result<(String, String)> {
let auth = auth.as_ref();
let (mark, content) = auth.split_at(BASIC_MASK.len());
let content = content.trim();
if mark != BASIC_MASK {
bail!("only support basic authorization");
}
let bytes = base64::decode(content.as_bytes())?;
let auth_str = String::from_utf8_lossy(&bytes);
let sp: Vec<&str> = auth_str.split(':').collect();
if sp.len() != 2 {
bail!("invalid authorization");
}
Ok((sp[0].to_string(), sp[1].to_string()))
}
#[get("/ldap_login")]
pub async fn login(
req: HttpRequest,
data: web::Data<Server>,
id: Identity,
) -> Result<impl Responder> {
let db = data.database.lock().await;
if let Ok(user) = check(&id, &db) {
if let Some(token) = user.token {
return Ok(HttpResponse::Ok().json(UserContext {
username: user.username,
role: user.role,
token,
r#type: user.type_,
}));
}
}
let ldap = match &data.config.read().await.registry.ldap {
Some(ldap_cfg) => {
let mut ldap = data.auth_context.ldap.lock().await;
ldap.connect(ldap_cfg.clone())
.await
.map_err(|e| ErrorInternalServerError(e))?;
ldap
}
None => return Err(ErrorBadRequest("ldap not enabled")),
};
if let Some(auth) = req.headers().get("Authorization") {
let (username, password) = match parse_auth(auth.to_str().map_err(|e| ErrorBadRequest(e))?)
{
Ok(r) => r,
Err(e) => return unauthorized(format!("{:?}", e)),
};
let mut user =
match get_user_by_name(&db, &username).map_err(|e| ErrorInternalServerError(e))? {
Some(u) => {
if u.type_ != AccountType::Ldap.as_ref() {
return unauthorized("invalid login type");
}
u
}
None => {
if let Some(u) = ldap
.search_user(&username)
.await
.map_err(|e| ErrorInternalServerError(e))?
{
u.insert(&db).map_err(|e| ErrorInternalServerError(e))?;
u
} else {
return unauthorized("invalid username or password");
}
}
};
if let Err(e) = ldap.login(&username, password).await {
warn!("{:?}", e);
return unauthorized("invalid username or password");
}
info!(
"remote: {} user: {} login ok via LDAP",
req.connection_info().remote_addr().unwrap_or("<unknown>"),
&username
);
user.last_login(Local::now().to_string())
.token(rand_str(64))
.update(&db)
.map_err(|e| ErrorInternalServerError(e))?;
id.remember(username.clone());
let query_string = req.query_string();
if !query_string.is_empty() {
let query = Query::<HashMap<String, String>>::from_query(query_string)?;
if let Some(redirect_url) = query.get("redirect") {
return Ok(HttpResponse::TemporaryRedirect()
.append_header(("Location", &**redirect_url))
.finish());
}
}
return Ok(HttpResponse::Ok().json(UserContext {
username: user.display_name,
token: user.token.unwrap(),
role: user.role,
r#type: user.type_,
}));
}
unauthorized("cancelled")
}
|
// Copyright 2021 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_with::skip_serializing_none;
use url::Url;
use super::ConfigurationSection;
#[skip_serializing_none]
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct OAuth2ClientConfig {
pub client_id: String,
#[serde(default)]
pub redirect_uris: Option<Vec<Url>>,
}
fn default_oauth2_issuer() -> Url {
"http://[::]:8080".parse().unwrap()
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct OAuth2Config {
#[serde(default = "default_oauth2_issuer")]
pub issuer: Url,
#[serde(default)]
pub clients: Vec<OAuth2ClientConfig>,
}
impl Default for OAuth2Config {
fn default() -> Self {
Self {
issuer: default_oauth2_issuer(),
clients: Vec::new(),
}
}
}
impl ConfigurationSection<'_> for OAuth2Config {
fn path() -> &'static str {
"oauth2"
}
fn generate() -> Self {
Self::default()
}
}
#[cfg(test)]
mod tests {
use figment::Jail;
use super::*;
#[test]
fn load_config() {
Jail::expect_with(|jail| {
jail.create_file(
"config.yaml",
r#"
oauth2:
issuer: https://example.com
clients:
- client_id: hello
redirect_uris:
- https://exemple.fr/callback
- client_id: world
"#,
)?;
let config = OAuth2Config::load_from_file("config.yaml")?;
assert_eq!(config.issuer, "https://example.com".parse().unwrap());
assert_eq!(config.clients.len(), 2);
assert_eq!(config.clients[0].client_id, "hello");
assert_eq!(
config.clients[0].redirect_uris,
Some(vec!["https://exemple.fr/callback".parse().unwrap()])
);
assert_eq!(config.clients[1].client_id, "world");
assert_eq!(config.clients[1].redirect_uris, None);
Ok(())
})
}
}
|
//! This module provides capabilities for managing a cache of rendered glyphs in
//! GPU memory, with the goal of minimisng the size and frequency of glyph
//! uploads to GPU memory from the CPU.
//!
//! This module is optional, and not compiled by default. To use it enable the
//! `gpu_cache` feature in your Cargo.toml.
//!
//! Typical applications that render directly with hardware graphics APIs (e.g.
//! games) need text rendering. There is not yet a performant solution for high
//! quality text rendering directly on the GPU that isn't experimental research
//! work. Quality is often critical for legibility, so many applications use
//! text or individual characters that have been rendered on the CPU. This is
//! done either ahead-of-time, giving a fixed set of fonts, characters, and
//! sizes that can be used at runtime, or dynamically as text is required. This
//! latter scenario is more flexible and the focus of this module.
//!
//! To minimise the CPU load and texture upload bandwidth saturation, recently
//! used glyphs should be cached on the GPU for use by future frames. This
//! module provides a mechanism for maintaining such a cache in the form of a
//! single packed 2D GPU texture. When a rendered glyph is requested, it is
//! either retrieved from its location in the texture if it is present or room
//! is made in the cache (if necessary), the CPU renders the glyph then it is
//! uploaded into a gap in the texture to be available for GPU rendering. This
//! cache uses a Least Recently Used (LRU) cache eviction scheme - glyphs in the
//! cache that have not been used recently are as a rule of thumb not likely to
//! be used again soon, so they are the best candidates for eviction to make
//! room for required glyphs.
//!
//! The API for the cache does not assume a particular graphics API. The
//! intended usage is to queue up glyphs that need to be present for the current
//! frame using `Cache::queue_glyph`, update the cache to ensure that the queued
//! glyphs are present using `Cache::cache_queued` (providing a function for
//! uploading pixel data), then when it's time to render call `Cache::rect_for`
//! to get the UV coordinates in the cache texture for each glyph. For a
//! concrete use case see the `gpu_cache` example.
//!
//! Cache dimensions are immutable. If you need to change the dimensions of the
//! cache texture (e.g. due to high cache pressure), rebuild a new `Cache`.
//! Either from scratch or with `CacheBuilder::rebuild`.
//!
//! # Example
//!
//! ```
//! # use rusttype::{Font, gpu_cache::Cache, point, Scale};
//! # use std::error::Error;
//! # fn example() -> Result<(), Box<dyn Error>> {
//! # let font_data: &[u8] = include_bytes!("../dev/fonts/dejavu/DejaVuSansMono.ttf");
//! # let font: Font<'static> = Font::try_from_bytes(font_data).unwrap();
//! # let glyph = font.glyph('a').scaled(Scale::uniform(25.0)).positioned(point(0.0, 0.0));
//! # let glyph2 = glyph.clone();
//! # fn update_gpu_texture(_: rusttype::Rect<u32>, _: &[u8]) {};
//! // Build a default Cache.
//! let mut cache = Cache::builder().build();
//!
//! // Queue some positioned glyphs needed for the next frame.
//! cache.queue_glyph(0, glyph);
//!
//! // Cache all queued glyphs somewhere in the cache texture.
//! // If new glyph data has been drawn the closure is called to upload
//! // the pixel data to GPU memory.
//! cache.cache_queued(|region, data| update_gpu_texture(region, data))?;
//!
//! # let glyph = glyph2;
//! // Lookup a positioned glyph's texture location
//! if let Ok(Some((uv_rect, screen_rect))) = cache.rect_for(0, &glyph) {
//! // Generate vertex data, etc
//! }
//! # Ok(())
//! # }
//! ```
use crate::{point, vector, GlyphId, Point, PositionedGlyph, Rect, Vector};
use linked_hash_map::LinkedHashMap;
use rustc_hash::{FxHashMap, FxHasher};
use std::collections::{HashMap, HashSet};
use std::error;
use std::fmt;
use std::hash::BuildHasherDefault;
type FxBuildHasher = BuildHasherDefault<FxHasher>;
/// Texture coordinates (floating point) of the quad for a glyph in the cache,
/// as well as the pixel-space (integer) coordinates that this region should be
/// drawn at.
pub type TextureCoords = (Rect<f32>, Rect<i32>);
type FontId = usize;
/// Indicates where a glyph texture is stored in the cache
/// (row position, glyph index in row)
type TextureRowGlyphIndex = (u32, u32);
/// Texture lookup key that uses scale & offset as integers attained
/// by dividing by the relevant tolerance.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
struct LossyGlyphInfo {
font_id: FontId,
glyph_id: GlyphId,
/// x & y scales divided by `scale_tolerance` & rounded
scale_over_tolerance: (u32, u32),
/// Normalised subpixel positions divided by `position_tolerance` & rounded
///
/// `u16` is enough as subpixel position `[-0.5, 0.5]` converted to `[0, 1]`
/// divided by the min `position_tolerance` (`0.001`) is small.
offset_over_tolerance: (u16, u16),
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct ByteArray2d {
inner_array: Vec<u8>,
row: usize,
col: usize,
}
impl ByteArray2d {
#[inline]
pub fn zeros(row: usize, col: usize) -> Self {
ByteArray2d {
inner_array: vec![0; row * col],
row,
col,
}
}
#[inline]
fn as_slice(&self) -> &[u8] {
self.inner_array.as_slice()
}
#[inline]
fn get_vec_index(&self, row: usize, col: usize) -> usize {
debug_assert!(
row < self.row,
"row out of range: row={}, given={}",
self.row,
row
);
debug_assert!(
col < self.col,
"column out of range: col={}, given={}",
self.col,
col
);
row * self.col + col
}
}
impl std::ops::Index<(usize, usize)> for ByteArray2d {
type Output = u8;
#[inline]
fn index(&self, (row, col): (usize, usize)) -> &u8 {
&self.inner_array[self.get_vec_index(row, col)]
}
}
impl std::ops::IndexMut<(usize, usize)> for ByteArray2d {
#[inline]
fn index_mut(&mut self, (row, col): (usize, usize)) -> &mut u8 {
let vec_index = self.get_vec_index(row, col);
&mut self.inner_array[vec_index]
}
}
/// Row of pixel data
struct Row {
/// Row pixel height
height: u32,
/// Pixel width current in use by glyphs
width: u32,
glyphs: Vec<GlyphTexInfo>,
}
struct GlyphTexInfo {
glyph_info: LossyGlyphInfo,
/// Actual (lossless) normalised subpixel offset of rasterized glyph
offset: Vector<f32>,
tex_coords: Rect<u32>,
}
trait PaddingAware {
fn unpadded(self) -> Self;
}
impl PaddingAware for Rect<u32> {
/// A padded texture has 1 extra pixel on all sides
fn unpadded(mut self) -> Self {
self.min.x += 1;
self.min.y += 1;
self.max.x -= 1;
self.max.y -= 1;
self
}
}
/// An implementation of a dynamic GPU glyph cache. See the module documentation
/// for more information.
pub struct Cache<'font> {
scale_tolerance: f32,
position_tolerance: f32,
width: u32,
height: u32,
rows: LinkedHashMap<u32, Row, FxBuildHasher>,
/// Mapping of row gaps bottom -> top
space_start_for_end: FxHashMap<u32, u32>,
/// Mapping of row gaps top -> bottom
space_end_for_start: FxHashMap<u32, u32>,
queue: Vec<(FontId, PositionedGlyph<'font>)>,
all_glyphs: FxHashMap<LossyGlyphInfo, TextureRowGlyphIndex>,
pad_glyphs: bool,
align_4x4: bool,
multithread: bool,
}
/// Builder & rebuilder for `Cache`.
///
/// # Example
///
/// ```
/// use rusttype::gpu_cache::Cache;
///
/// // Create a cache with all default values set explicitly
/// // equivalent to `Cache::builder().build()`
/// let default_cache = Cache::builder()
/// .dimensions(256, 256)
/// .scale_tolerance(0.1)
/// .position_tolerance(0.1)
/// .pad_glyphs(true)
/// .align_4x4(false)
/// .multithread(true)
/// .build();
///
/// // Create a cache with all default values, except with a dimension of 1024x1024
/// let bigger_cache = Cache::builder().dimensions(1024, 1024).build();
/// ```
#[derive(Debug, Clone)]
pub struct CacheBuilder {
dimensions: (u32, u32),
scale_tolerance: f32,
position_tolerance: f32,
pad_glyphs: bool,
align_4x4: bool,
multithread: bool,
}
impl Default for CacheBuilder {
fn default() -> Self {
Self {
dimensions: (256, 256),
scale_tolerance: 0.1,
position_tolerance: 0.1,
pad_glyphs: true,
align_4x4: false,
multithread: true,
}
}
}
impl CacheBuilder {
/// `width` & `height` dimensions of the 2D texture that will hold the
/// cache contents on the GPU.
///
/// This must match the dimensions of the actual texture used, otherwise
/// `cache_queued` will try to cache into coordinates outside the bounds of
/// the texture.
///
/// # Example (set to default value)
///
/// ```
/// # use rusttype::gpu_cache::Cache;
/// let cache = Cache::builder().dimensions(256, 256).build();
/// ```
pub fn dimensions(mut self, width: u32, height: u32) -> Self {
self.dimensions = (width, height);
self
}
/// Specifies the tolerances (maximum allowed difference) for judging
/// whether an existing glyph in the cache is close enough to the
/// requested glyph in scale to be used in its place. Due to floating
/// point inaccuracies a min value of `0.001` is enforced.
///
/// Both `scale_tolerance` and `position_tolerance` are measured in pixels.
///
/// Tolerances produce even steps for scale and subpixel position. Only a
/// single glyph texture will be used within a single step. For example,
/// `scale_tolerance = 0.1` will have a step `9.95-10.05` so similar glyphs
/// with scale `9.98` & `10.04` will match.
///
/// A typical application will produce results with no perceptible
/// inaccuracies with `scale_tolerance` and `position_tolerance` set to
/// 0.1. Depending on the target DPI higher tolerance may be acceptable.
///
/// # Example (set to default value)
///
/// ```
/// # use rusttype::gpu_cache::Cache;
/// let cache = Cache::builder().scale_tolerance(0.1).build();
/// ```
pub fn scale_tolerance<V: Into<f32>>(mut self, scale_tolerance: V) -> Self {
self.scale_tolerance = scale_tolerance.into();
self
}
/// Specifies the tolerances (maximum allowed difference) for judging
/// whether an existing glyph in the cache is close enough to the requested
/// glyph in subpixel offset to be used in its place. Due to floating
/// point inaccuracies a min value of `0.001` is enforced.
///
/// Both `scale_tolerance` and `position_tolerance` are measured in pixels.
///
/// Tolerances produce even steps for scale and subpixel position. Only a
/// single glyph texture will be used within a single step. For example,
/// `scale_tolerance = 0.1` will have a step `9.95-10.05` so similar glyphs
/// with scale `9.98` & `10.04` will match.
///
/// Note that since `position_tolerance` is a tolerance of subpixel
/// offsets, setting it to 1.0 or higher is effectively a "don't care"
/// option.
///
/// A typical application will produce results with no perceptible
/// inaccuracies with `scale_tolerance` and `position_tolerance` set to
/// 0.1. Depending on the target DPI higher tolerance may be acceptable.
///
/// # Example (set to default value)
///
/// ```
/// # use rusttype::gpu_cache::Cache;
/// let cache = Cache::builder().position_tolerance(0.1).build();
/// ```
pub fn position_tolerance<V: Into<f32>>(mut self, position_tolerance: V) -> Self {
self.position_tolerance = position_tolerance.into();
self
}
/// Pack glyphs in texture with a padding of a single zero alpha pixel to
/// avoid bleeding from interpolated shader texture lookups near edges.
///
/// If glyphs are never transformed this may be set to `false` to slightly
/// improve the glyph packing.
///
/// # Example (set to default value)
///
/// ```
/// # use rusttype::gpu_cache::Cache;
/// let cache = Cache::builder().pad_glyphs(true).build();
/// ```
pub fn pad_glyphs(mut self, pad_glyphs: bool) -> Self {
self.pad_glyphs = pad_glyphs;
self
}
/// Align glyphs in texture to 4x4 texel boundaries.
///
/// If your backend requires texture updates to be aligned to 4x4 texel
/// boundaries (e.g. WebGL), this should be set to `true`.
///
/// # Example (set to default value)
///
/// ```
/// # use rusttype::gpu_cache::Cache;
/// let cache = Cache::builder().align_4x4(false).build();
/// ```
pub fn align_4x4(mut self, align_4x4: bool) -> Self {
self.align_4x4 = align_4x4;
self
}
/// When multiple CPU cores are available spread rasterization work across
/// all cores.
///
/// Significantly reduces worst case latency in multicore environments.
///
/// # Platform-specific behaviour
///
/// This option has no effect on wasm32.
///
/// # Example (set to default value)
///
/// ```
/// # use rusttype::gpu_cache::Cache;
/// let cache = Cache::builder().multithread(true).build();
/// ```
pub fn multithread(mut self, multithread: bool) -> Self {
self.multithread = multithread;
self
}
fn validated(self) -> Self {
assert!(self.scale_tolerance >= 0.0);
assert!(self.position_tolerance >= 0.0);
let scale_tolerance = self.scale_tolerance.max(0.001);
let position_tolerance = self.position_tolerance.max(0.001);
#[cfg(not(target_arch = "wasm32"))]
let multithread = self.multithread && num_cpus::get() > 1;
Self {
scale_tolerance,
position_tolerance,
#[cfg(not(target_arch = "wasm32"))]
multithread,
..self
}
}
/// Constructs a new cache. Note that this is just the CPU side of the
/// cache. The GPU texture is managed by the user.
///
/// # Panics
///
/// `scale_tolerance` or `position_tolerance` are less than or equal to
/// zero.
///
/// # Example
///
/// ```
/// # use rusttype::gpu_cache::Cache;
/// let cache = Cache::builder().build();
/// ```
pub fn build<'a>(self) -> Cache<'a> {
let CacheBuilder {
dimensions: (width, height),
scale_tolerance,
position_tolerance,
pad_glyphs,
align_4x4,
multithread,
} = self.validated();
Cache {
scale_tolerance,
position_tolerance,
width,
height,
rows: LinkedHashMap::default(),
space_start_for_end: {
let mut m = HashMap::default();
m.insert(height, 0);
m
},
space_end_for_start: {
let mut m = HashMap::default();
m.insert(0, height);
m
},
queue: Vec::new(),
all_glyphs: HashMap::default(),
pad_glyphs,
align_4x4,
multithread,
}
}
/// Rebuilds a `Cache` with new attributes. All cached glyphs are cleared,
/// however the glyph queue is retained unmodified.
///
/// # Panics
///
/// `scale_tolerance` or `position_tolerance` are less than or equal to
/// zero.
///
/// # Example
///
/// ```
/// # use rusttype::gpu_cache::Cache;
/// # let mut cache = Cache::builder().build();
/// // Rebuild the cache with different dimensions
/// cache.to_builder().dimensions(768, 768).rebuild(&mut cache);
/// ```
pub fn rebuild(self, cache: &mut Cache) {
let CacheBuilder {
dimensions: (width, height),
scale_tolerance,
position_tolerance,
pad_glyphs,
align_4x4,
multithread,
} = self.validated();
cache.width = width;
cache.height = height;
cache.scale_tolerance = scale_tolerance;
cache.position_tolerance = position_tolerance;
cache.pad_glyphs = pad_glyphs;
cache.align_4x4 = align_4x4;
cache.multithread = multithread;
cache.clear();
}
}
/// Returned from `Cache::rect_for`.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum CacheReadErr {
/// Indicates that the requested glyph is not present in the cache
GlyphNotCached,
}
impl fmt::Display for CacheReadErr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CacheReadErr::GlyphNotCached => "Glyph not cached",
}
.fmt(f)
}
}
impl error::Error for CacheReadErr {}
/// Returned from `Cache::cache_queued`.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum CacheWriteErr {
/// At least one of the queued glyphs is too big to fit into the cache, even
/// if all other glyphs are removed.
GlyphTooLarge,
/// Not all of the requested glyphs can fit into the cache, even if the
/// cache is completely cleared before the attempt.
NoRoomForWholeQueue,
}
impl fmt::Display for CacheWriteErr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CacheWriteErr::GlyphTooLarge => "Glyph too large",
CacheWriteErr::NoRoomForWholeQueue => "No room for whole queue",
}
.fmt(f)
}
}
impl error::Error for CacheWriteErr {}
/// Successful method of caching of the queue.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum CachedBy {
/// Added any additional glyphs into the texture without affecting
/// the position of any already cached glyphs in the latest queue.
///
/// Glyphs not in the latest queue may have been removed.
Adding,
/// Fit the glyph queue by re-ordering all glyph texture positions.
/// Previous texture positions are no longer valid.
Reordering,
}
fn normalised_offset_from_position(position: Point<f32>) -> Vector<f32> {
let mut offset = vector(position.x.fract(), position.y.fract());
if offset.x > 0.5 {
offset.x -= 1.0;
} else if offset.x < -0.5 {
offset.x += 1.0;
}
if offset.y > 0.5 {
offset.y -= 1.0;
} else if offset.y < -0.5 {
offset.y += 1.0;
}
offset
}
impl<'font> Cache<'font> {
/// Returns a default `CacheBuilder`.
#[inline]
pub fn builder() -> CacheBuilder {
CacheBuilder::default()
}
/// Returns the current scale tolerance for the cache.
pub fn scale_tolerance(&self) -> f32 {
self.scale_tolerance
}
/// Returns the current subpixel position tolerance for the cache.
pub fn position_tolerance(&self) -> f32 {
self.position_tolerance
}
/// Returns the cache texture dimensions assumed by the cache. For proper
/// operation this should match the dimensions of the used GPU texture.
pub fn dimensions(&self) -> (u32, u32) {
(self.width, self.height)
}
/// Queue a glyph for caching by the next call to `cache_queued`. `font_id`
/// is used to disambiguate glyphs from different fonts. The user should
/// ensure that `font_id` is unique to the font the glyph is from.
pub fn queue_glyph(&mut self, font_id: usize, glyph: PositionedGlyph<'font>) {
if glyph.pixel_bounding_box().is_some() {
self.queue.push((font_id, glyph));
}
}
/// Clears the cache. Does not affect the glyph queue.
pub fn clear(&mut self) {
self.rows.clear();
self.space_end_for_start.clear();
self.space_end_for_start.insert(0, self.height);
self.space_start_for_end.clear();
self.space_start_for_end.insert(self.height, 0);
self.all_glyphs.clear();
}
/// Clears the glyph queue.
pub fn clear_queue(&mut self) {
self.queue.clear();
}
/// Returns a `CacheBuilder` with this cache's attributes.
pub fn to_builder(&self) -> CacheBuilder {
CacheBuilder {
dimensions: (self.width, self.height),
position_tolerance: self.position_tolerance,
scale_tolerance: self.scale_tolerance,
pad_glyphs: self.pad_glyphs,
align_4x4: self.align_4x4,
multithread: self.multithread,
}
}
/// Returns glyph info with accuracy according to the set tolerances.
fn lossy_info_for(&self, font_id: FontId, glyph: &PositionedGlyph<'font>) -> LossyGlyphInfo {
let scale = glyph.scale();
let offset = normalised_offset_from_position(glyph.position());
LossyGlyphInfo {
font_id,
glyph_id: glyph.id(),
scale_over_tolerance: (
(scale.x / self.scale_tolerance + 0.5) as u32,
(scale.y / self.scale_tolerance + 0.5) as u32,
),
// convert [-0.5, 0.5] -> [0, 1] then divide
offset_over_tolerance: (
((offset.x + 0.5) / self.position_tolerance + 0.5) as u16,
((offset.y + 0.5) / self.position_tolerance + 0.5) as u16,
),
}
}
/// Caches the queued glyphs. If this is unsuccessful, the queue is
/// untouched. Any glyphs cached by previous calls to this function may be
/// removed from the cache to make room for the newly queued glyphs. Thus if
/// you want to ensure that a glyph is in the cache, the most recently
/// cached queue must have contained that glyph.
///
/// `uploader` is the user-provided function that should perform the texture
/// uploads to the GPU. The information provided is the rectangular region
/// to insert the pixel data into, and the pixel data itself. This data is
/// provided in horizontal scanline format (row major), with stride equal to
/// the rectangle width.
///
/// If successful returns a `CachedBy` that can indicate the validity of
/// previously cached glyph textures.
pub fn cache_queued<F: FnMut(Rect<u32>, &[u8])>(
&mut self,
mut uploader: F,
) -> Result<CachedBy, CacheWriteErr> {
let mut queue_success = true;
let from_empty = self.all_glyphs.is_empty();
{
let (mut in_use_rows, mut uncached_glyphs) = {
let mut in_use_rows =
HashSet::with_capacity_and_hasher(self.rows.len(), FxBuildHasher::default());
let mut uncached_glyphs = Vec::with_capacity(self.queue.len());
// divide glyphs into texture rows where a matching glyph texture
// already exists & glyphs where new textures must be cached
for (font_id, ref glyph) in &self.queue {
let glyph_info = self.lossy_info_for(*font_id, glyph);
if let Some((row, ..)) = self.all_glyphs.get(&glyph_info) {
in_use_rows.insert(*row);
} else {
uncached_glyphs.push((glyph, glyph_info));
}
}
(in_use_rows, uncached_glyphs)
};
for row in &in_use_rows {
self.rows.get_refresh(row);
}
// tallest first gives better packing
// can use 'sort_unstable' as order of equal elements is unimportant
uncached_glyphs
.sort_unstable_by_key(|(glyph, ..)| -glyph.pixel_bounding_box().unwrap().height());
self.all_glyphs.reserve(uncached_glyphs.len());
let mut draw_and_upload = Vec::with_capacity(uncached_glyphs.len());
'per_glyph: for (glyph, glyph_info) in uncached_glyphs {
// glyph may match a texture cached by a previous iteration
if self.all_glyphs.contains_key(&glyph_info) {
continue;
}
// Not cached, so add it:
let (unaligned_width, unaligned_height) = {
let bb = glyph.pixel_bounding_box().unwrap();
if self.pad_glyphs {
(bb.width() as u32 + 2, bb.height() as u32 + 2)
} else {
(bb.width() as u32, bb.height() as u32)
}
};
let (aligned_width, aligned_height) = if self.align_4x4 {
// align to the next 4x4 texel boundary
((unaligned_width + 3) & !3, (unaligned_height + 3) & !3)
} else {
(unaligned_width, unaligned_height)
};
if aligned_width >= self.width || aligned_height >= self.height {
return Result::Err(CacheWriteErr::GlyphTooLarge);
}
// find row to put the glyph in, most used rows first
let mut row_top = None;
for (top, row) in self.rows.iter().rev() {
if row.height >= aligned_height && self.width - row.width >= aligned_width {
// found a spot on an existing row
row_top = Some(*top);
break;
}
}
if row_top.is_none() {
let mut gap = None;
// See if there is space for a new row
for (start, end) in &self.space_end_for_start {
if end - start >= aligned_height {
gap = Some((*start, *end));
break;
}
}
if gap.is_none() {
// Remove old rows until room is available
while !self.rows.is_empty() {
// check that the oldest row isn't also in use
if !in_use_rows.contains(self.rows.front().unwrap().0) {
// Remove row
let (top, row) = self.rows.pop_front().unwrap();
for g in row.glyphs {
self.all_glyphs.remove(&g.glyph_info);
}
let (mut new_start, mut new_end) = (top, top + row.height);
// Update the free space maps
// Combine with neighbouring free space if possible
if let Some(end) = self.space_end_for_start.remove(&new_end) {
new_end = end;
}
if let Some(start) = self.space_start_for_end.remove(&new_start) {
new_start = start;
}
self.space_start_for_end.insert(new_end, new_start);
self.space_end_for_start.insert(new_start, new_end);
if new_end - new_start >= aligned_height {
// The newly formed gap is big enough
gap = Some((new_start, new_end));
break;
}
}
// all rows left are in use
// try a clean insert of all needed glyphs
// if that doesn't work, fail
else if from_empty {
// already trying a clean insert, don't do it again
return Err(CacheWriteErr::NoRoomForWholeQueue);
} else {
// signal that a retry is needed
queue_success = false;
break 'per_glyph;
}
}
}
let (gap_start, gap_end) = gap.unwrap();
// fill space for new row
let new_space_start = gap_start + aligned_height;
self.space_end_for_start.remove(&gap_start);
if new_space_start == gap_end {
self.space_start_for_end.remove(&gap_end);
} else {
self.space_end_for_start.insert(new_space_start, gap_end);
self.space_start_for_end.insert(gap_end, new_space_start);
}
// add the row
self.rows.insert(
gap_start,
Row {
width: 0,
height: aligned_height,
glyphs: Vec::new(),
},
);
row_top = Some(gap_start);
}
let row_top = row_top.unwrap();
// calculate the target rect
let row = self.rows.get_refresh(&row_top).unwrap();
let aligned_tex_coords = Rect {
min: point(row.width, row_top),
max: point(row.width + aligned_width, row_top + aligned_height),
};
let unaligned_tex_coords = Rect {
min: point(row.width, row_top),
max: point(row.width + unaligned_width, row_top + unaligned_height),
};
draw_and_upload.push((aligned_tex_coords, glyph));
// add the glyph to the row
row.glyphs.push(GlyphTexInfo {
glyph_info,
offset: normalised_offset_from_position(glyph.position()),
tex_coords: unaligned_tex_coords,
});
row.width += aligned_width;
in_use_rows.insert(row_top);
self.all_glyphs
.insert(glyph_info, (row_top, row.glyphs.len() as u32 - 1));
}
if queue_success {
#[cfg(not(target_arch = "wasm32"))]
{
let glyph_count = draw_and_upload.len();
if self.multithread && glyph_count > 1 {
// multithread rasterization
use crossbeam_deque::Steal;
use std::{
mem,
sync::mpsc::{self, TryRecvError},
};
let rasterize_queue = crossbeam_deque::Injector::new();
let (to_main, from_stealers) = mpsc::channel();
let pad_glyphs = self.pad_glyphs;
for el in draw_and_upload {
rasterize_queue.push(el);
}
crossbeam_utils::thread::scope(|scope| {
for _ in 0..num_cpus::get().min(glyph_count).saturating_sub(1) {
let rasterize_queue = &rasterize_queue;
let to_main = to_main.clone();
scope.spawn(move |_| loop {
match rasterize_queue.steal() {
Steal::Success((tex_coords, glyph)) => {
let pixels = draw_glyph(tex_coords, glyph, pad_glyphs);
to_main.send((tex_coords, pixels)).unwrap();
}
Steal::Empty => break,
Steal::Retry => {}
}
});
}
mem::drop(to_main);
let mut workers_finished = false;
loop {
match rasterize_queue.steal() {
Steal::Success((tex_coords, glyph)) => {
let pixels = draw_glyph(tex_coords, glyph, pad_glyphs);
uploader(tex_coords, pixels.as_slice());
}
Steal::Empty if workers_finished => break,
Steal::Empty | Steal::Retry => {}
}
while !workers_finished {
match from_stealers.try_recv() {
Ok((tex_coords, pixels)) => {
uploader(tex_coords, pixels.as_slice())
}
Err(TryRecvError::Disconnected) => workers_finished = true,
Err(TryRecvError::Empty) => break,
}
}
}
})
.unwrap();
} else {
// single thread rasterization
for (tex_coords, glyph) in draw_and_upload {
let pixels = draw_glyph(tex_coords, glyph, self.pad_glyphs);
uploader(tex_coords, pixels.as_slice());
}
}
}
#[cfg(target_arch = "wasm32")]
{
for (tex_coords, glyph) in draw_and_upload {
let pixels = draw_glyph(tex_coords, glyph, self.pad_glyphs);
uploader(tex_coords, pixels.as_slice());
}
}
}
}
if queue_success {
self.queue.clear();
Ok(CachedBy::Adding)
} else {
// clear the cache then try again with optimal packing
self.clear();
self.cache_queued(uploader).map(|_| CachedBy::Reordering)
}
}
/// Retrieves the (floating point) texture coordinates of the quad for a
/// glyph in the cache, as well as the pixel-space (integer) coordinates
/// that this region should be drawn at. These pixel-space coordinates
/// assume an origin at the top left of the quad. In the majority of cases
/// these pixel-space coordinates should be identical to the bounding box of
/// the input glyph. They only differ if the cache has returned a substitute
/// glyph that is deemed close enough to the requested glyph as specified by
/// the cache tolerance parameters.
///
/// A sucessful result is `Some` if the glyph is not an empty glyph (no
/// shape, and thus no rect to return).
///
/// Ensure that `font_id` matches the `font_id` that was passed to
/// `queue_glyph` with this `glyph`.
pub fn rect_for(
&self,
font_id: usize,
glyph: &PositionedGlyph,
) -> Result<Option<TextureCoords>, CacheReadErr> {
if glyph.pixel_bounding_box().is_none() {
return Ok(None);
}
let (row, index) = self
.all_glyphs
.get(&self.lossy_info_for(font_id, glyph))
.ok_or(CacheReadErr::GlyphNotCached)?;
let (tex_width, tex_height) = (self.width as f32, self.height as f32);
let GlyphTexInfo {
tex_coords: mut tex_rect,
offset: tex_offset,
..
} = self.rows[row].glyphs[*index as usize];
if self.pad_glyphs {
tex_rect = tex_rect.unpadded();
}
let uv_rect = Rect {
min: point(
tex_rect.min.x as f32 / tex_width,
tex_rect.min.y as f32 / tex_height,
),
max: point(
tex_rect.max.x as f32 / tex_width,
tex_rect.max.y as f32 / tex_height,
),
};
let local_bb = glyph
.unpositioned()
.clone()
.positioned(point(0.0, 0.0) + tex_offset)
.pixel_bounding_box()
.unwrap();
let min_from_origin =
point(local_bb.min.x as f32, local_bb.min.y as f32) - (point(0.0, 0.0) + tex_offset);
let ideal_min = min_from_origin + glyph.position();
let min = point(ideal_min.x.round() as i32, ideal_min.y.round() as i32);
let bb_offset = min - local_bb.min;
let bb = Rect {
min,
max: local_bb.max + bb_offset,
};
Ok(Some((uv_rect, bb)))
}
}
#[inline]
fn draw_glyph(tex_coords: Rect<u32>, glyph: &PositionedGlyph<'_>, pad_glyphs: bool) -> ByteArray2d {
let mut pixels = ByteArray2d::zeros(tex_coords.height() as usize, tex_coords.width() as usize);
if pad_glyphs {
glyph.draw(|x, y, v| {
let v = (v * 255.0).round() as u8;
// `+ 1` accounts for top/left glyph padding
pixels[(y as usize + 1, x as usize + 1)] = v;
});
} else {
glyph.draw(|x, y, v| {
let v = (v * 255.0).round() as u8;
pixels[(y as usize, x as usize)] = v;
});
}
pixels
}
#[cfg(test)]
mod test {
use super::*;
use crate::{Font, Scale};
use approx::*;
#[test]
fn cache_test() {
let font_data = include_bytes!("../dev/fonts/wqy-microhei/WenQuanYiMicroHei.ttf");
let font = Font::try_from_bytes(font_data as &[u8]).unwrap();
let mut cache: Cache<'static> = Cache::builder()
.dimensions(32, 32)
.scale_tolerance(0.1)
.position_tolerance(0.1)
.pad_glyphs(false)
.build();
let strings = [
("Hello World!", 15.0),
("Hello World!", 14.0),
("Hello World!", 10.0),
("Hello World!", 15.0),
("Hello World!", 14.0),
("Hello World!", 10.0),
];
for &(string, scale) in &strings {
println!("Caching {:?}", (string, scale));
for glyph in font.layout(string, Scale::uniform(scale), point(0.0, 0.0)) {
cache.queue_glyph(0, glyph);
}
cache.cache_queued(|_, _| {}).unwrap();
}
}
#[test]
fn need_to_check_whole_cache() {
let font_data = include_bytes!("../dev/fonts/wqy-microhei/WenQuanYiMicroHei.ttf");
let font = Font::try_from_bytes(font_data as &[u8]).unwrap();
let glyph = font.glyph('l');
let small = glyph.clone().scaled(Scale::uniform(10.0));
let large = glyph.clone().scaled(Scale::uniform(10.05));
let small_left = small.clone().positioned(point(0.0, 0.0));
let large_left = large.clone().positioned(point(0.0, 0.0));
let large_right = large.clone().positioned(point(-0.2, 0.0));
let mut cache = Cache::builder()
.dimensions(32, 32)
.scale_tolerance(0.1)
.position_tolerance(0.1)
.pad_glyphs(false)
.build();
cache.queue_glyph(0, small_left.clone());
// Next line is noop since it's within the scale tolerance of small_left:
cache.queue_glyph(0, large_left.clone());
cache.queue_glyph(0, large_right.clone());
cache.cache_queued(|_, _| {}).unwrap();
cache.rect_for(0, &small_left).unwrap();
cache.rect_for(0, &large_left).unwrap();
cache.rect_for(0, &large_right).unwrap();
}
#[test]
fn lossy_info() {
let font_data = include_bytes!("../dev/fonts/wqy-microhei/WenQuanYiMicroHei.ttf");
let font = Font::try_from_bytes(font_data as &[u8]).unwrap();
let glyph = font.glyph('l');
let small = glyph.clone().scaled(Scale::uniform(9.91));
let near = glyph.clone().scaled(Scale::uniform(10.09));
let far = glyph.clone().scaled(Scale::uniform(10.11));
let really_far = glyph.clone().scaled(Scale::uniform(12.0));
let small_pos = small.clone().positioned(point(0.0, 0.0));
let match_1 = near.clone().positioned(point(-10.0, -0.1));
let match_2 = near.clone().positioned(point(5.1, 0.24));
let match_3 = small.clone().positioned(point(-100.2, 50.1));
let miss_1 = far.clone().positioned(point(0.0, 0.0));
let miss_2 = really_far.clone().positioned(point(0.0, 0.0));
let miss_3 = small.clone().positioned(point(0.3, 0.0));
let cache = Cache::builder()
.scale_tolerance(0.2)
.position_tolerance(0.5)
.build();
let small_info = cache.lossy_info_for(0, &small_pos);
assert_eq!(small_info, cache.lossy_info_for(0, &match_1));
assert_eq!(small_info, cache.lossy_info_for(0, &match_2));
assert_eq!(small_info, cache.lossy_info_for(0, &match_3));
assert_ne!(small_info, cache.lossy_info_for(0, &miss_1));
assert_ne!(small_info, cache.lossy_info_for(0, &miss_2));
assert_ne!(small_info, cache.lossy_info_for(0, &miss_3));
}
#[test]
fn cache_to_builder() {
let cache = CacheBuilder {
dimensions: (32, 64),
scale_tolerance: 0.2,
position_tolerance: 0.3,
pad_glyphs: false,
align_4x4: false,
multithread: false,
}
.build();
let to_builder: CacheBuilder = cache.to_builder();
assert_eq!(to_builder.dimensions, (32, 64));
assert_relative_eq!(to_builder.scale_tolerance, 0.2);
assert_relative_eq!(to_builder.position_tolerance, 0.3);
assert_eq!(to_builder.pad_glyphs, false);
assert_eq!(to_builder.align_4x4, false);
assert_eq!(to_builder.multithread, false);
}
#[test]
fn builder_rebuild() {
let mut cache = Cache::builder()
.dimensions(32, 64)
.scale_tolerance(0.2)
.position_tolerance(0.3)
.pad_glyphs(false)
.align_4x4(true)
.multithread(true)
.build();
let font = Font::try_from_bytes(include_bytes!(
"../dev/fonts/wqy-microhei/WenQuanYiMicroHei.ttf"
) as &[u8])
.unwrap();
cache.queue_glyph(
0,
font.glyph('l')
.scaled(Scale::uniform(25.0))
.positioned(point(0.0, 0.0)),
);
cache.cache_queued(|_, _| {}).unwrap();
cache.queue_glyph(
0,
font.glyph('a')
.scaled(Scale::uniform(25.0))
.positioned(point(0.0, 0.0)),
);
Cache::builder()
.dimensions(64, 128)
.scale_tolerance(0.05)
.position_tolerance(0.15)
.pad_glyphs(true)
.align_4x4(false)
.multithread(false)
.rebuild(&mut cache);
assert_eq!(cache.width, 64);
assert_eq!(cache.height, 128);
assert_relative_eq!(cache.scale_tolerance, 0.05);
assert_relative_eq!(cache.position_tolerance, 0.15);
assert_eq!(cache.pad_glyphs, true);
assert_eq!(cache.align_4x4, false);
assert_eq!(cache.multithread, false);
assert!(
cache.all_glyphs.is_empty(),
"cache should have been cleared"
);
assert_eq!(cache.queue.len(), 1, "cache should have an unchanged queue");
}
/// Provide to caller that the cache was re-ordered to fit the latest queue
#[test]
fn return_cache_by_reordering() {
let font_data = include_bytes!("../dev/fonts/wqy-microhei/WenQuanYiMicroHei.ttf");
let font = Font::try_from_bytes(font_data as &[u8]).unwrap();
let mut cache = Cache::builder()
.dimensions(36, 27)
.scale_tolerance(0.1)
.position_tolerance(0.1)
.build();
for glyph in font.layout("ABCDEFG", Scale::uniform(16.0), point(0.0, 0.0)) {
cache.queue_glyph(0, glyph);
}
assert_eq!(cache.cache_queued(|_, _| {}), Ok(CachedBy::Adding));
for glyph in font.layout("DEFGHIJK", Scale::uniform(16.0), point(0.0, 0.0)) {
cache.queue_glyph(0, glyph);
}
assert_eq!(cache.cache_queued(|_, _| {}), Ok(CachedBy::Reordering));
}
#[test]
fn align_4x4() {
// First, test align_4x4 disabled, to confirm non-4x4 alignment
align_4x4_helper(false, 5, 19);
// Now, test with align_4x4 enabled, to confirm 4x4 alignment
align_4x4_helper(true, 8, 20);
}
fn align_4x4_helper(align_4x4: bool, expected_width: u32, expected_height: u32) {
let mut cache = Cache::builder()
.dimensions(64, 64)
.align_4x4(align_4x4)
.build();
let font = Font::try_from_bytes(include_bytes!(
"../dev/fonts/wqy-microhei/WenQuanYiMicroHei.ttf"
) as &[u8])
.unwrap();
let glyph = font
.glyph('l')
.scaled(Scale::uniform(25.0))
.positioned(point(0.0, 0.0));
cache.queue_glyph(0, glyph.clone());
cache
.cache_queued(|rect, _| {
assert_eq!(rect.width(), expected_width);
assert_eq!(rect.height(), expected_height);
})
.unwrap();
let (uv_rect, _screen_rect) = cache.rect_for(0, &glyph).unwrap().unwrap();
assert_eq!(
uv_rect,
crate::Rect {
min: crate::point(0.015_625, 0.015_625),
max: crate::point(0.0625, 0.28125),
}
);
}
}
|
use dirs::home_dir;
use std::fmt::Display;
use std::fmt::Formatter;
use std::fmt::Result;
use std::path::PathBuf;
const CHAIN: [&str; 3] = [
"/Library/Sounds/",
"/Network/Library/Sounds/",
"/System/Library/Sounds/",
];
#[derive(Clone, Copy, Debug)]
pub enum Sound {
Default,
Basso,
Blow,
Bottle,
Frog,
Funk,
Glass,
Hero,
Morse,
Ping,
Pop,
Purr,
Sosumi,
Submarine,
Tink,
}
impl Sound {
pub fn is_supported(self) -> bool {
home_dir()
.map(|path| path.join("/Library/Sounds/"))
.into_iter()
.chain(CHAIN.iter().map(PathBuf::from))
.map(|path| path.join(format!("{}.aiff", self)))
.any(|path| path.exists())
}
}
impl Default for Sound {
fn default() -> Self {
Sound::Default
}
}
impl Display for Sound {
fn fmt(&self, f: &mut Formatter) -> Result {
match self {
Sound::Default => write!(f, "Default"),
Sound::Basso => write!(f, "Basso"),
Sound::Blow => write!(f, "Blow"),
Sound::Bottle => write!(f, "Bottle"),
Sound::Frog => write!(f, "Frog"),
Sound::Funk => write!(f, "Funk"),
Sound::Glass => write!(f, "Glass"),
Sound::Hero => write!(f, "Hero"),
Sound::Morse => write!(f, "Morse"),
Sound::Ping => write!(f, "Ping"),
Sound::Pop => write!(f, "Pop"),
Sound::Purr => write!(f, "Purr"),
Sound::Sosumi => write!(f, "Sosumi"),
Sound::Submarine => write!(f, "Submarine"),
Sound::Tink => write!(f, "Tink"),
}
}
}
|
use std::fs;
use std::io::Read;
use std::path::PathBuf;
use structopt::clap::arg_enum;
use structopt::StructOpt;
use eyre::Result as AResult;
arg_enum! {
#[derive(Debug)]
enum OutputFormat {
Json,
Debug,
}
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "vim-swapfile-header",
about = "Reads vim swapfile headers and dumps them as json"
)]
struct Opts {
filename: PathBuf,
#[structopt(default_value = "json", long, short = "f")]
output_format: OutputFormat,
}
fn main() -> AResult<()> {
let args = Opts::from_args();
let mut file = fs::OpenOptions::new().read(true).open(&args.filename)?;
let mut buf = [0u8; 1024];
file.read(&mut buf)?;
let header = vim_swapfile_header::from_bytes(&buf)?;
let out_s = match args.output_format {
OutputFormat::Json => serde_json::ser::to_string(&header)?,
OutputFormat::Debug => format!("{:#?}", &header),
};
println!("{}", out_s);
Ok(())
}
|
pub mod camera;
pub mod model;
|
/*!
* Perseus is a blazingly fast frontend web development framework built in Rust with support for major rendering strategies,
* reactivity without a virtual DOM, and extreme customizability. It wraps the lower-level capabilities of [Sycamore](https://github.com/sycamore-rs/sycamore)
* and provides a NextJS-like API!
*
* - ✨ Supports static generation (serving only static resources)
* - ✨ Supports server-side rendering (serving dynamic resources)
* - ✨ Supports revalidation after time and/or with custom logic (updating rendered pages)
* - ✨ Supports incremental regeneration (build on demand)
* - ✨ Open build matrix (use any rendering strategy with anything else, mostly)
* - ✨ CLI harness that lets you build apps with ease and confidence
*
* This is the documentation for the Perseus Actix Web integration, but there's also [a CLI](https://arctic-hen7.github.io/perseus/cli.html),
* [the core package](https://crates.io/crates/perseus), and other [integrations](https://arctic-hen7.github.io/perseus/serving.html)
* to make serving apps on other platforms easier!
*
* # Resources
*
* These docs will help you as a reference, but [the book](https://arctic-hen7.github.io/perseus/integrations/actix-web.html) should
* be your first port of call for learning about how to use Perseus and how it works.
*
* - [The Book](https://arctic-hen7.github.io/perseus)
* - [GitHub repository](https://github.com/arctic-hen7/perseus)
* - [Crate page](https://crates.io/crates/perseus)
* - [Gitter chat](https://gitter.im/perseus-framework/community)
* - [Discord server channel](https://discord.com/channels/820400041332179004/883168134331256892) (for Sycamore-related stuff)
*/
#![deny(missing_docs)]
mod configurer;
mod conv_req;
pub mod errors;
mod initial_load;
mod page_data;
mod translations;
pub use crate::configurer::{configurer, Options};
|
use crate::interpreter::HeaderFlag::PrimSelf;
use crate::objectmemory::{ObjectLayout, ObjectMemory, UWord, Word, CANNOT_RETURN_SEL, CHARACTER_TABLE_PTR, CLASS_ARRAY_PTR, CLASS_BLOCK_CONTEXT_PTR, CLASS_CHARACTER_PTR, CLASS_LARGE_POSITIVEINTEGER_PTR, CLASS_MESSAGE_PTR, CLASS_METHOD_CONTEXT_PTR, CLASS_POINT_PTR, CLASS_STRING_PTR, DOES_NOT_UNDERSTAND_SEL, FALSE_PTR, MUST_BE_BOOLEAN_SEL, NIL_PTR, OOP, SCHEDULER_ASSOCIATION_PTR, SPECIAL_SELECTORS_PTR, TRUE_PTR, CLASS_FLOAT_PTR};
use std::collections::{VecDeque, HashSet, HashMap};
use std::borrow::Cow;
use std::cell::{RefCell, Cell};
use std::rc::Rc;
use crate::interpreter::gc_support::HeldOops;
use std::fs::File;
use std::io::BufRead;
use crate::utils::floor_divmod;
use std::time::{Instant, SystemTime, UNIX_EPOCH};
mod bitblt;
mod display;
mod startup;
const DBG_INSN: bool = false;
const DBG_CALL: bool = false;
const DBG_LOOKUP: bool = false;
const DBG_MEM: bool = false;
mod gc_support;
pub struct Interpreter {
memory: ObjectMemory,
active_context: OOP,
home_context: OOP,
method: OOP,
receiver: OOP,
// These should really be unsigned
ip: usize,
sp: usize,
cycle: usize,
call_depth: usize,
// benchmark
bmark_cycles: usize,
bmark_lastprint: u64,
// Message lookup process
message_selector: OOP,
argument_count: usize,
new_method: OOP,
primitive_index: usize,
method_cache: [MethodCacheEntry; 256],
// process scheduler
new_process: Option<OOP>,
semaphore_list: Vec<OOP>,
// display
display: DisplayState,
display_impl: self::display::StDisplay,
// io
startup_time: ::std::time::Instant,
timer_sem: Option<OOP>,
timer_when: u32,
// GC support
held_objects: Rc<RefCell<Vec<Vec<OOP>>>>,
dbg_alloc: Cell<bool>,
}
// Method constants
const HEADER_INDEX: usize = 0;
const LITERAL_START: usize = 1;
// Association constants
const VALUE_INDEX: usize = 1;
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
pub struct MethodHeader(usize);
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
pub enum HeaderFlag {
// no primitive, 0-4 arguments (in value)
Normal(usize),
PrimSelf,
PrimReturn,
HeaderExt,
}
impl MethodHeader {
pub fn new(oop: OOP) -> Self {
MethodHeader(oop.as_integer() as usize)
}
pub fn temporary_count(self) -> usize {
(self.0 >> 7) & 0x1F
}
pub fn large_context_flag(self) -> bool {
self.0 & 0x40 != 0
}
pub fn literal_count(self) -> usize {
self.0 & 0x3F
}
pub fn oop_count(self) -> usize {
self.literal_count() + LITERAL_START
}
pub fn initial_ip(self) -> usize {
// This might be wrong, as we are zero-indexed
self.oop_count() * OOP::byte_size() + 1
}
pub fn flag_value(self) -> HeaderFlag {
use self::HeaderFlag::*;
match (self.0 >> 12) & 0x7 {
count @ (0...4) => HeaderFlag::Normal(count),
5 => PrimSelf,
6 => PrimReturn,
7 => HeaderExt,
_ => unreachable!("Should be exhaustive"),
}
}
pub fn field_index(self) -> usize {
self.temporary_count()
}
}
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
struct HeaderExt(usize);
impl HeaderExt {
fn new(oop: OOP) -> HeaderExt {
HeaderExt(oop.as_integer() as usize)
}
fn primitive_index(self) -> usize {
self.0 & 0xFF
}
fn argument_count(self) -> usize {
(self.0 >> 8) & 0x1F
}
}
impl Interpreter {
// compiled methods
fn method_header_of(&self, method: OOP) -> MethodHeader {
MethodHeader::new(self.memory.get_ptr(method, 0))
}
pub fn method_literal_of(&self, method: OOP, offset: usize) -> OOP {
self.memory.get_ptr(method, LITERAL_START + offset)
}
// method headers
#[allow(unused)]
fn header_extension(&self, method: OOP) -> HeaderExt {
self.header_extension_fast(method, self.method_header_of(method))
}
fn header_extension_fast(&self, method: OOP, header: MethodHeader) -> HeaderExt {
let oop = self.method_literal_of(method, header.literal_count() - 2);
HeaderExt::new(oop)
}
pub fn argument_count(&self, method: OOP) -> usize {
let header = self.method_header_of(method);
match header.flag_value() {
HeaderFlag::Normal(arg_count) => arg_count,
HeaderFlag::HeaderExt => self.header_extension_fast(method, header).argument_count(),
HeaderFlag::PrimReturn | HeaderFlag::PrimSelf => 0,
}
}
pub fn primitive_index(&self, method: OOP) -> usize {
self.primitive_index_fast(method, self.method_header_of(method))
}
fn primitive_index_fast(&self, method: OOP, header: MethodHeader) -> usize {
if header.flag_value() == HeaderFlag::HeaderExt {
self.header_extension_fast(method, header).primitive_index()
} else {
0
}
}
fn method_class(&self, method: OOP) -> OOP {
self.method_class_fast(method, self.method_header_of(method))
}
fn method_class_fast(&self, method: OOP, header: MethodHeader) -> OOP {
let klass_assoc = self.method_literal_of(method, header.literal_count() - 1);
self.memory.get_ptr(klass_assoc, VALUE_INDEX)
}
}
// Context stuff
const CTX_SENDER_INDEX: usize = 0;
const CTX_IP_INDEX: usize = 1;
const CTX_SP_INDEX: usize = 2;
const CTX_METHOD_INDEX: usize = 3;
const CTX_RECEIVER_INDEX: usize = 5;
const CTX_TEMPFRAME_START_INDEX: usize = 6;
const CTX_CALLER_INDEX: usize = 0;
const CTX_BLOCK_ARG_COUNT_INDEX: usize = 3;
const CTX_INITIAL_IP_INDEX: usize = 4;
const CTX_HOME_INDEX: usize = 5;
/// Context methods
impl Interpreter {
pub fn context_get_ip(&self, context: OOP) -> Word {
self.memory.get_ptr(context, CTX_IP_INDEX).as_integer()
}
pub fn context_put_ip(&mut self, context: OOP, new_val: Word) {
self.memory
.put_ptr(context, CTX_IP_INDEX, OOP::from(new_val))
}
pub fn context_get_sp(&self, context: OOP) -> Word {
self.memory.get_ptr(context, CTX_SP_INDEX).as_integer()
}
pub fn context_put_sp(&mut self, context: OOP, new_val: Word) {
self.memory
.put_ptr(context, CTX_SP_INDEX, OOP::from(new_val))
}
pub fn block_argument_count(&mut self, context: OOP) -> usize {
self.memory.get_ptr(context, CTX_BLOCK_ARG_COUNT_INDEX).as_integer() as usize
}
pub fn is_block_ctx(&self, context: OOP) -> bool {
!self.memory.get_ptr(context, CTX_METHOD_INDEX).is_object()
}
pub fn load_ctx(&mut self) {
if self.is_block_ctx(self.active_context) {
self.home_context = self.memory.get_ptr(self.active_context, CTX_HOME_INDEX);
} else {
self.home_context = self.active_context;
}
self.receiver = self.memory.get_ptr(self.home_context, CTX_RECEIVER_INDEX);
self.method = self.memory.get_ptr(self.home_context, CTX_METHOD_INDEX);
self.ip = self.context_get_ip(self.active_context) as UWord as usize - 1;
self.sp = self.context_get_sp(self.active_context) as UWord as usize
+ CTX_TEMPFRAME_START_INDEX
- 1;
let mut sender = self.active_context;
self.call_depth = 0;
while sender != NIL_PTR {
self.call_depth += 1;
sender = self.memory.get_ptr(sender, CTX_SENDER_INDEX);
}
}
pub fn save_ctx(&mut self) {
self.context_put_ip(self.active_context, (self.ip + 1) as Word);
self.context_put_sp(
self.active_context,
(self.sp as Word + 1 - CTX_TEMPFRAME_START_INDEX as Word),
);
}
pub fn push(&mut self, oop: OOP) {
self.sp += 1;
if DBG_MEM {
println!("STCK {:?}_{} <- {}", self.active_context, self.sp - CTX_TEMPFRAME_START_INDEX, self.obj_name(oop));
}
self.memory.put_ptr(self.active_context, self.sp, oop);
}
pub fn pop(&mut self) -> OOP {
let top = self.stack_top();
self.sp -= 1;
top
}
pub fn stack_value(&self, offset: usize) -> OOP {
let value = self.memory.get_ptr(self.active_context, self.sp - offset);
if DBG_MEM {
println!("STCK {:?}_{} -> {}", self.active_context, self.sp as isize - offset as isize - CTX_TEMPFRAME_START_INDEX as isize, self.obj_name(value));
}
value
}
pub fn stack_top(&self) -> OOP {
self.stack_value(0)
}
pub fn popn(&mut self, count: usize) {
self.sp -= count;
if DBG_MEM {
println!("STCK {:?}_sp <- {}", self.active_context,
if self.sp < CTX_TEMPFRAME_START_INDEX {
format!("\x1b[1;32m{}\x1b[0m", self.sp as isize - CTX_TEMPFRAME_START_INDEX as isize)
} else {
format!("{}", self.sp - CTX_TEMPFRAME_START_INDEX)
})
}
}
pub fn unpopn(&mut self, count: usize) {
self.sp += count;
if DBG_MEM {
println!("STCK {:?}_sp <- {}", self.active_context, self.sp - CTX_TEMPFRAME_START_INDEX)
}
}
pub fn new_active_context(&mut self, ctx: OOP) {
self.save_ctx();
self.memory.dec_ref(self.active_context);
self.active_context = ctx;
self.memory.inc_ref(self.active_context);
self.load_ctx();
}
pub fn ctx_sender(&self) -> OOP {
self.memory.get_ptr(self.home_context, CTX_SENDER_INDEX)
}
pub fn ctx_caller(&self) -> OOP {
self.memory.get_ptr(self.active_context, CTX_SENDER_INDEX)
}
pub fn ctx_get_temp(&self, offset: usize) -> OOP {
let value = self.memory
.get_ptr(self.home_context, offset + CTX_TEMPFRAME_START_INDEX);
if DBG_MEM {
println!("TEMP {:?}_{} -> {}", self.home_context, offset, self.obj_name(value));
}
value
}
pub fn ctx_put_temp(&mut self, offset: usize, value: OOP) {
if DBG_MEM {
println!("TEMP {:?}_{} <- {}", self.home_context, offset, self.obj_name(value));
}
self.memory
.put_ptr(self.home_context, offset + CTX_TEMPFRAME_START_INDEX, value)
}
pub fn ctx_literal(&self, offset: usize) -> OOP {
self.method_literal_of(self.method, offset)
}
}
// method lookup constants
const SUPERCLASS_INDEX: usize = 0;
const MESSAGE_DICTIONARY_INDEX: usize = 1;
pub const INSTANCE_SPECIFICATION_INDEX: usize = 2;
const METHOD_ARRAY_INDEX: usize = 1;
const SELECTOR_START: usize = 2;
const MESSAGE_SELECTOR_INDEX: usize = 0;
const MESSAGE_ARGUMENTS_INDEX: usize = 1;
const MESSAGE_SIZE: usize = 2;
/// Method lookup
impl Interpreter {
pub fn oop_hash(&self, oop: OOP) -> usize {
oop.as_oid() as UWord as usize
}
pub fn superclass_of(&self, klass: OOP) -> OOP {
self.memory.get_ptr(klass, SUPERCLASS_INDEX)
}
pub fn lookup_method_in_dict(&mut self, dict: OOP) -> bool {
let length = self.memory.get_word_length_of(dict);
let mask = length - SELECTOR_START - 1;
let index = (mask & self.oop_hash(self.message_selector)) + SELECTOR_START;
for index in (index..length).into_iter().chain(SELECTOR_START..index) {
let next_selector = self.memory.get_ptr(dict, index);
if next_selector == NIL_PTR {
return false;
} else if next_selector == self.message_selector {
let method_array = self.memory.get_ptr(dict, METHOD_ARRAY_INDEX);
self.new_method = self.memory.get_ptr(method_array, index - SELECTOR_START);
self.primitive_index = self.primitive_index(self.new_method);
return true;
}
}
return false;
}
pub fn lookup_method_in_class(&mut self, klass: OOP) -> bool {
let mut current_class = klass;
while current_class != NIL_PTR {
if DBG_LOOKUP {
println!("Looking in {:?}", current_class);
}
let dictionary = self.memory.get_ptr(current_class, MESSAGE_DICTIONARY_INDEX);
if self.lookup_method_in_dict(dictionary) {
return true;
}
current_class = self.superclass_of(current_class);
}
if self.message_selector == DOES_NOT_UNDERSTAND_SEL {
panic!("Recursive not understood error encountered")
}
self.create_actual_message();
println!("[cycle={}] {:depth$} DNU", self.cycle, "", depth = self.call_depth);
self.message_selector = DOES_NOT_UNDERSTAND_SEL;
return self.lookup_method_in_class(klass);
}
pub fn create_actual_message(&mut self) {
let protector = self.oop_protector();
let argument_array = protector.push(self.instantiate_class(
CLASS_ARRAY_PTR,
self.argument_count,
ObjectLayout::Pointer,
));
let message = protector.push(
self.instantiate_class(CLASS_MESSAGE_PTR, MESSAGE_SIZE, ObjectLayout::Pointer),
);
self.memory
.put_ptr(message, MESSAGE_SELECTOR_INDEX, self.message_selector);
self.memory
.put_ptr(message, MESSAGE_ARGUMENTS_INDEX, argument_array);
self.memory.transfer_fields(
self.argument_count,
self.active_context,
self.sp - self.argument_count + 1,
argument_array,
0,
);
self.popn(self.argument_count);
self.push(message);
self.argument_count = 1;
}
}
// Instance specification
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
pub(crate) struct InstanceSpecification(usize);
impl InstanceSpecification {
pub fn is_pointers(self) -> bool {
self.0 & 0x4000 != 0
}
pub fn is_words(self) -> bool {
self.0 & 0x2000 != 0
}
pub fn is_indexable(self) -> bool {
self.0 & 0x1000 != 0
}
pub fn fixed_fields(self) -> usize {
self.0 & 0x7FF
}
}
impl From<OOP> for InstanceSpecification {
fn from(oop: OOP) -> Self {
InstanceSpecification(oop.as_integer() as UWord as usize)
}
}
impl Interpreter {
pub(crate) fn instance_specification(&self, klass: OOP) -> InstanceSpecification {
self.memory
.get_ptr(klass, INSTANCE_SPECIFICATION_INDEX)
.into()
}
}
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
pub enum Insn {
PushReceiverVar(usize),
PushTemporary(usize),
PushConst(OOP),
PushLiteralConst(usize),
PushLiteralVar(usize),
PopReceiverVar(usize),
PopTemporary(usize),
PopLiteralVar(usize),
PushReceiver,
MessageReturnRcvr,
MessageReturnOOP(OOP),
MessageReturn,
BlockReturn,
StoreReceiverVar(usize),
StoreTemporary(usize),
StoreLiteralVar(usize),
SendLiteral(usize, usize), // selector, arguments
SendLiteralSuper(usize, usize),
Pop,
Dup,
PushCtx,
Jump(isize),
JumpFalse(isize), // pop and jump
JumpTrue(isize),
SendSpecial(usize),
Illegal1([u8; 1]),
Illegal2([u8; 2]),
Illegal3([u8; 3]),
}
/// The actual interpreter
impl Interpreter {
pub fn fetch_insn_byte(&mut self) -> u8 {
let insn = self.memory.get_byte(self.method, self.ip);
self.ip += 1;
insn
}
pub fn interpret(&mut self) {
loop {
self.cycle();
let bmark_time = self.startup_time.elapsed().as_secs() / 5;
if bmark_time != self.bmark_lastprint {
self.bmark_lastprint = bmark_time;
let cycles = (self.cycle - self.bmark_cycles)/5;
self.bmark_cycles = self.cycle;
println!("BMRK Cycles/s: {}", cycles);
}
}
}
pub fn cycle(&mut self) {
self.check_process_switch();
let (insn, sz) = Self::decode_insn(self.memory.get_bytes(self.method), self.ip);
self.ip += sz;
self.cycle += 1;
if DBG_INSN {
println!("[cycle={}] Insn: {:?}", self.cycle, insn);
}
self.dispatch(insn);
}
pub fn decode_insn(bytecode: &[u8], ip: usize) -> (Insn, usize) {
let mut fetch_ip = ip;
let mut next_byte = || {
let byte = bytecode[fetch_ip];
// println!("Insn byte: {:3} (0x{:x})", byte, byte);
fetch_ip += 1;
byte
};
let insn = next_byte();
let decoded = match insn {
0x00...0x0F => Insn::PushReceiverVar(insn as usize & 0xF),
0x10...0x1F => Insn::PushTemporary(insn as usize & 0xF),
0x20...0x3F => Insn::PushLiteralConst(insn as usize & 0x1F),
0x40...0x5F => Insn::PushLiteralVar(insn as usize & 0x1F),
0x60...0x67 => Insn::PopReceiverVar(insn as usize & 0x07),
0x68...0x6F => Insn::PopTemporary(insn as usize & 0x07),
0x70 => Insn::PushReceiver,
0x71 => Insn::PushConst(TRUE_PTR),
0x72 => Insn::PushConst(FALSE_PTR),
0x73 => Insn::PushConst(NIL_PTR),
0x74 => Insn::PushConst(OOP::from(-1)),
0x75 => Insn::PushConst(OOP::from(0)),
0x76 => Insn::PushConst(OOP::from(1)),
0x77 => Insn::PushConst(OOP::from(2)),
0x78 => Insn::MessageReturnRcvr,
0x79 => Insn::MessageReturnOOP(TRUE_PTR),
0x7A => Insn::MessageReturnOOP(FALSE_PTR),
0x7B => Insn::MessageReturnOOP(NIL_PTR),
0x7C => Insn::MessageReturn,
0x7D => Insn::BlockReturn,
0x7E...0x7F => Insn::Illegal1([insn]),
0x80 => {
let next = next_byte();
let sub = next as usize & 0x3F;
match next & 0xC0 {
0x00 => Insn::PushReceiverVar(sub),
0x40 => Insn::PushTemporary(sub),
0x80 => Insn::PushLiteralConst(sub),
0xC0 => Insn::PushLiteralVar(sub),
_ => unreachable!(),
}
}
0x81 => {
let next = next_byte();
let sub = next as usize & 0x3F;
match next & 0xC0 {
0x00 => Insn::StoreReceiverVar(sub),
0x40 => Insn::StoreTemporary(sub),
0x80 => Insn::Illegal2([insn, next]),
0xC0 => Insn::StoreLiteralVar(sub),
_ => unreachable!(),
}
}
0x82 => {
let next = next_byte();
let sub = next as usize & 0x3F;
match next & 0xC0 {
0x00 => Insn::PopReceiverVar(sub),
0x40 => Insn::PopTemporary(sub),
0x80 => Insn::Illegal2([insn, next]),
0xC0 => Insn::PopLiteralVar(sub),
_ => unreachable!(),
}
}
0x83 => {
let next = next_byte() as usize;
Insn::SendLiteral(next & 0x1F, next >> 5)
}
0x84 => {
let args = next_byte() as usize;
let sel = next_byte() as usize;
Insn::SendLiteral(sel, args)
}
0x85 => {
let next = next_byte() as usize;
Insn::SendLiteralSuper(next & 0x1F, next >> 5)
}
0x86 => {
let args = next_byte() as usize;
let sel = next_byte() as usize;
Insn::SendLiteralSuper(sel, args)
}
0x87 => Insn::Pop,
0x88 => Insn::Dup,
0x89 => Insn::PushCtx,
0x8A...0x8F => Insn::Illegal1([insn]),
0x90...0x97 => Insn::Jump((insn as isize & 0x7) + 1),
0x98...0x9F => Insn::JumpFalse((insn as isize & 0x7) + 1),
0xA0...0xA7 => {
let next = next_byte() as isize;
Insn::Jump((((insn as isize & 0x7) - 4) << 8) + next)
}
0xA8...0xAB => {
let next = next_byte() as isize;
Insn::JumpTrue(((insn as isize & 0x3) << 8) + next)
}
0xAC...0xAF => {
let next = next_byte() as isize;
Insn::JumpFalse(((insn as isize & 0x3) << 8) + next)
}
0xB0...0xCF => Insn::SendSpecial(insn as usize - 0xB0),
0xD0...0xDF => Insn::SendLiteral(insn as usize & 0xF, 0),
0xE0...0xEF => Insn::SendLiteral(insn as usize & 0xF, 1),
0xF0...0xFF => Insn::SendLiteral(insn as usize & 0xF, 2),
};
(decoded, fetch_ip - ip)
}
pub fn dispatch(&mut self, insn: Insn) {
match insn {
Insn::PushReceiverVar(i) => {
// push receiver variable
self.push(self.memory.get_ptr(self.receiver, i));
}
Insn::PushTemporary(i) => {
// push temporary
self.push(self.ctx_get_temp(i));
}
Insn::PushConst(oop) => self.push(oop),
Insn::PushLiteralConst(i) => self.push(self.ctx_literal(i)),
Insn::PushLiteralVar(i) => {
self.push(self.memory.get_ptr(self.ctx_literal(i), VALUE_INDEX))
}
Insn::PopLiteralVar(i) => {
let value = self.pop();
let literal = self.ctx_literal(i);
self.memory.put_ptr(literal, VALUE_INDEX, value)
}
Insn::StoreLiteralVar(i) => {
self.memory
.put_ptr(self.ctx_literal(i), VALUE_INDEX, self.stack_top())
}
Insn::PopReceiverVar(i) => {
let value = self.pop();
self.memory.put_ptr(self.receiver, i, value)
}
Insn::StoreReceiverVar(i) => self.memory.put_ptr(self.receiver, i, self.stack_top()),
Insn::PopTemporary(i) => {
let value = self.pop();
self.ctx_put_temp(i, value)
}
Insn::StoreTemporary(i) => {
self.ctx_put_temp(i, self.stack_top());
},
Insn::PushReceiver => self.push(self.receiver),
Insn::PushCtx => self.push(self.active_context),
Insn::Dup => self.push(self.stack_top()),
Insn::Pop => {
self.pop();
}
Insn::Jump(i) => {
self.ip = (self.ip as isize + i) as usize;
self.interruption_point();
}
Insn::JumpFalse(i) => self.conditional_jump(FALSE_PTR, i),
Insn::JumpTrue(i) => self.conditional_jump(TRUE_PTR, i),
Insn::BlockReturn => {
let value = self.pop();
self.return_value(self.ctx_caller(), value)
}
Insn::MessageReturn => {
let value = self.pop();
self.return_value(self.ctx_sender(), value)
}
Insn::MessageReturnOOP(oop) => self.return_value(self.ctx_sender(), oop),
Insn::MessageReturnRcvr => self.return_value(self.ctx_sender(), self.receiver),
Insn::SendLiteral(sel, args) => self.send_selector(self.ctx_literal(sel), args),
Insn::SendLiteralSuper(sel, args) => {
self.argument_count = args;
self.message_selector = self.ctx_literal(sel);
self.send_selector_to_class(self.superclass_of(self.method_class(self.method)));
}
Insn::SendSpecial(sel) => {
if self.special_selector_primitive_response(sel).is_none() {
let selector = self.memory.get_ptr(SPECIAL_SELECTORS_PTR, sel * 2);
let count = self
.memory
.get_ptr(SPECIAL_SELECTORS_PTR, sel * 2 + 1)
.as_integer() as UWord as usize;
self.send_selector(selector, count);
}
}
Insn::Illegal1(enc) => panic!("Illegal opcode {:?}", enc),
Insn::Illegal2(enc) => panic!("Illegal opcode {:?}", enc),
Insn::Illegal3(enc) => panic!("Illegal opcode {:?}", enc),
}
}
fn return_value(&mut self, ctx: OOP, value: OOP) {
let protect = self.oop_protector();
self.interruption_point();
if ctx == NIL_PTR {
self.push(self.active_context);
self.push(value);
self.send_selector(CANNOT_RETURN_SEL, 1);
return;
}
let sender_ip = self.memory.get_ptr(ctx, CTX_IP_INDEX);
if sender_ip == NIL_PTR {
self.push(self.active_context);
self.push(value);
self.send_selector(CANNOT_RETURN_SEL, 1);
return;
}
if DBG_CALL {
println!("[cycle={}] {:depth$} RETN {}", self.cycle, "", self.obj_name(value), depth = self.call_depth);
}
self.memory.inc_ref(value);
protect.push(value);
self.return_to_active_context(ctx);
self.push(value);
self.memory.dec_ref(value);
}
fn return_to_active_context(&mut self, ctx: OOP) {
let protect = self.oop_protector();
protect.push(ctx);
self.memory.inc_ref(ctx);
self.nil_context_fields();
self.memory.dec_ref(ctx);
self.active_context = ctx;
self.load_ctx();
}
fn nil_context_fields(&mut self) {
self.memory
.put_ptr(self.active_context, CTX_SENDER_INDEX, NIL_PTR);
self.memory
.put_ptr(self.active_context, CTX_IP_INDEX, NIL_PTR);
}
fn send_selector(&mut self, selector: OOP, arg_count: usize) {
self.message_selector = selector;
self.argument_count = arg_count;
let new_receiver = self.stack_value(arg_count);
self.send_selector_to_class(self.memory.get_class_of(new_receiver));
}
fn send_selector_to_class(&mut self, klass: OOP) {
if DBG_CALL {
println!("[cycle={}] {:depth$} SEND {}", self.cycle, "", self.print_methodcall(), depth = self.call_depth);
}
// println!("Send selector {:?}", read_st_string(&self.memory, self.message_selector));
self.find_new_method_in_class(klass);
self.execute_new_method();
}
fn find_new_method_in_class(&mut self, klass: OOP) {
let hash = (self.message_selector.as_raw() ^ klass.as_raw()) & 0xFF;
let cached: &mut MethodCacheEntry = &mut self.method_cache[hash as UWord as usize];
let found = cached.selector == self.message_selector && cached.klass == klass;
if found {
// println!("Cached method {:?}", cached.new_method);
self.new_method = cached.new_method;
self.primitive_index = cached.primitive_index;
} else {
self.lookup_method_in_class(klass);
let cached: &mut MethodCacheEntry = &mut self.method_cache[hash as UWord as usize];
cached.selector = self.message_selector;
cached.klass = klass;
cached.new_method = self.new_method;
cached.primitive_index = self.primitive_index;
}
}
fn execute_new_method(&mut self) {
// println!("Executing method {:?}", self.new_method);
if self.primitive_response().is_none() {
self.activate_new_method();
self.interruption_point();
} else {
// println!("Handled primitively");
}
}
fn primitive_response(&mut self) -> Option<()> {
let prim = self.primitive_index;
if self.primitive_index == 0 {
let header = self.method_header_of(self.new_method);
match header.flag_value() {
HeaderFlag::PrimSelf => {
if DBG_INSN {
println!("Prim self");
}
Some(())
},
HeaderFlag::PrimReturn => {
let field_idx = header.field_index();
if DBG_INSN {
println!("Prim return {}", field_idx);
}
let this_rcvr = self.pop();
let value = self.memory.get_ptr(this_rcvr, field_idx);
self.push(value);
Some(())
}
_ => None,
}
} else {
let old_context = self.active_context;
let res = self.dispatch_prim();
if DBG_CALL {
let failflag = if res.is_none() {
" FAIL".to_string()
} else if self.active_context != old_context {
" (new context)".to_string()
} else {
format!(" => {}", self.obj_name(self.stack_top()))
};
println!("[cycle={}] {:depth$} PRIM {}{}",
self.cycle, "", prim, failflag,
depth = self.call_depth);
}
res
}
}
fn activate_new_method(&mut self) {
let context_size = if self.method_header_of(self.new_method).large_context_flag() {
64 + CTX_TEMPFRAME_START_INDEX
} else {
36 + CTX_TEMPFRAME_START_INDEX
};
let new_ctx = self.instantiate_class(CLASS_METHOD_CONTEXT_PTR, context_size, ObjectLayout::Pointer);
self.memory.put_ptr(new_ctx, CTX_SENDER_INDEX, self.active_context);
let new_method_header = self.method_header_of(self.new_method);
self.context_put_ip(new_ctx, new_method_header.initial_ip() as Word);
self.context_put_sp(new_ctx, new_method_header.temporary_count() as Word);
self.memory.put_ptr(new_ctx, CTX_METHOD_INDEX, self.new_method);
self.memory.transfer_fields(
self.argument_count + 1,
self.active_context,
self.sp - self.argument_count,
new_ctx,
CTX_RECEIVER_INDEX,
);
self.popn(self.argument_count + 1);
self.new_active_context(new_ctx)
}
fn conditional_jump(&mut self, condition: OOP, offset: isize) {
self.interruption_point();
let value = self.pop();
if value == condition {
self.ip = (self.ip as isize + offset) as usize;
} else {
if (value != TRUE_PTR) && (value != FALSE_PTR) {
self.unpopn(1);
self.send_selector(MUST_BE_BOOLEAN_SEL, 0);
}
}
}
}
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug, Default)]
struct MethodCacheEntry {
selector: OOP,
klass: OOP,
new_method: OOP,
primitive_index: usize,
}
// primitives
impl Interpreter {
fn instantiate_class(&mut self, klass: OOP, size: usize, layout: ObjectLayout) -> OOP {
if self.dbg_alloc.get() {
println!("ALOC {} {:?} for {:?}", size, layout, klass);
}
let new_val = self.memory.instantiate_class(klass, size, layout)
.or_else(|| {
self.gc();
self.memory.instantiate_class(klass, size, layout)
});
if let Some(oop) = new_val {
return oop;
} else {
use crate::objectmemory::ImageFormat;
self.save_ctx();
self.backtrace();
crate::objectmemory::text_format::TextFormat::save("crash.dump", &self.memory);
panic!("Out of OOPs at context {:?}", self.active_context);
}
}
fn read_method_table(&self) -> Option<HashMap<OOP, String>> {
let mut method_table: HashMap<OOP, String> = HashMap::new();
let mt_file = File::open("../original-image/method_table").ok()?;
use std::io::{BufReader, prelude::*};
let mut r = BufReader::new(mt_file);
for line in r.lines() {
let line = line.ok()?;
let mut fields = line.split('\t');
let raw_oop = u16::from_str_radix(fields.next()?, 16).ok()?;
let name = fields.next()?.trim();
method_table.insert(OOP(raw_oop as Word), name.to_owned());
}
Some(method_table)
}
fn backtrace(&self) {
let method_table = self.read_method_table()
.unwrap_or_default();
let mut ctx = self.active_context;
while ctx != NIL_PTR {
let (block_str, home) = if self.is_block_ctx(ctx) {
(" [block]", self.memory.get_ptr(ctx, CTX_HOME_INDEX))
} else {
("", ctx)
};
let method = self.memory.get_ptr(home, CTX_METHOD_INDEX);
let ip = self.memory.get_ptr(ctx, CTX_IP_INDEX);
println!("\t{} @ {}{}",
method_table.get(&method).map(|method| method.to_owned())
.unwrap_or_else(|| format!("{:?}", method)),
ip.as_integer(),
block_str,
);
ctx = self.memory.get_ptr(ctx, CTX_SENDER_INDEX);
}
}
fn new_float(&mut self, value: f32) -> OOP {
let obj = self.instantiate_class(CLASS_FLOAT_PTR, 2, ObjectLayout::Word);
self.memory.put_float(obj, value);
obj
}
fn oop_protector(&self) -> HeldOops {
HeldOops::new(&self)
}
fn gc(&mut self) {
// println!("GC!");
self.memory.clear_marks();
let mut queue = Vec::new();
queue.push(self.active_context);
// global dictionary
queue.push(OOP::pointer(0x12));
// protected objects
for level in self.held_objects.borrow().iter() {
for obj in level.iter() {
queue.push(*obj)
}
}
// println!("Protection level: {}", self.held_objects.borrow().len());
self.memory.trace(queue);
#[allow(unused)]
let dropped = self.memory.drop_unmarked();
// println!("Freed {}", dropped);
let oops_left = self.memory.oops_left();
// println!("FREE {}", self.memory.oops_left());
self.dbg_alloc.set(oops_left < 100);
}
fn get_integer(&mut self, oop: OOP, field: usize) -> Option<Word> {
let iptr = self.memory.get_ptr(oop, field);
if iptr.is_integer() {
Some(iptr.as_integer())
} else {
None
}
}
fn put_integer(&mut self, oop: OOP, field: usize, value: Word) -> Option<()> {
self.memory
.put_ptr(oop, field, OOP::try_from_integer(value)?);
Some(())
}
fn pop_integer(&mut self) -> Option<Word> {
let stack_top = self.pop();
if stack_top.is_integer() {
Some(stack_top.as_integer())
} else {
None
}
}
fn push_integer(&mut self, int: Word) -> Option<()> {
self.push(OOP::try_from_integer(int)?);
Some(())
}
fn long_integer_for(&mut self, int: usize) -> OOP {
let oop = OOP::from(int as Word);
if oop.as_integer() as isize as usize == int {
oop
} else {
let mut i = 0;
let mut itmp = int;
while itmp != 0 {
itmp >>= 8;
i += 1;
}
if i < 2 {
i += 2;
}
let obj = self.instantiate_class(
CLASS_LARGE_POSITIVEINTEGER_PTR,
i,
ObjectLayout::Byte,
);
itmp = int;
for j in 0..i {
self.memory.put_byte(obj, j, (itmp & 0xff) as u8);
itmp >>= 8;
}
obj
}
}
fn long_integer_value_of(&self, oop: OOP) -> Option<usize> {
if oop.is_integer() {
Some(oop.as_integer() as usize)
} else if self.memory.get_class_of(oop) == CLASS_LARGE_POSITIVEINTEGER_PTR {
let mut result = 0;
for i in 0..self.memory.get_byte_length_of(oop) {
result = result + (self.memory.get_byte(oop, i) as usize) << (i * 8);
}
Some(result)
} else {
None
}
}
// Selectors
fn special_selector_primitive_response(&mut self, sel: usize) -> Option<()> {
if sel < 16 {
self.prim_arith(sel)
} else {
self.prim_common(sel)
}
}
fn prim_arith(&mut self, sel: usize) -> Option<()> {
match sel {
0 => self.prim_add(),
1 => self.prim_sub(),
2 => self.prim_lt(),
3 => self.prim_gt(),
4 => self.prim_le(),
5 => self.prim_ge(),
6 => self.prim_eq(),
7 => self.prim_ne(),
8 => self.prim_mul(),
9 => self.prim_divide(),
10 => self.prim_mod(),
11 => self.prim_mk_point(),
12 => self.prim_bitshift(),
13 => self.prim_div(),
14 => self.prim_bitand(),
15 => self.prim_bitor(),
_ => panic!("Unimplemented arith primitive"),
}
}
fn prim_common(&mut self, sel: usize) -> Option<()> {
let argument_count = self.get_integer(SPECIAL_SELECTORS_PTR, sel * 2 + 1)?;
let rcvr_klass = self
.memory
.get_class_of(self.stack_value(argument_count as usize));
match self.primitive_index {
22 => self.prim_equiv(),
23 => self.prim_class(),
24 => {
if rcvr_klass == CLASS_METHOD_CONTEXT_PTR || rcvr_klass == CLASS_BLOCK_CONTEXT_PTR {
self.prim_block_copy()
} else {
None
}
}
25 | 26 => {
if rcvr_klass == CLASS_BLOCK_CONTEXT_PTR {
self.prim_value()
} else {
None
}
}
_ => None,
}
}
fn dispatch_prim(&mut self) -> Option<()> {
match self.primitive_index {
0...19 => self.dispatch_prim_arith(),
40...59 => self.dispatch_prim_float(),
60...67 => self.dispatch_prim_sub_and_stream(),
68...79 => self.dispatch_prim_storage(),
80...89 => self.dispatch_prim_control(),
90...109 => self.dispatch_prim_io(),
110...127 => self.dispatch_prim_system(),
128...255 => self.dispatch_prim_private(),
_ => None,
}
}
fn dispatch_prim_arith(&mut self) -> Option<()> {
match self.primitive_index {
1 => self.prim_add(),
2 => self.prim_sub(),
3 => self.prim_lt(),
4 => self.prim_gt(),
5 => self.prim_le(),
6 => self.prim_ge(),
7 => self.prim_eq(),
8 => self.prim_ne(),
9 => self.prim_mul(),
10 => self.prim_divide(),
11 => self.prim_mod(),
12 => self.prim_div(),
13 => self.prim_quo(),
14 => self.prim_bitand(),
15 => self.prim_bitor(),
16 => self.prim_bitxor(),
17 => self.prim_bitshift(),
18 => self.prim_mk_point(),
_ => None,
}
}
fn prim_add(&mut self) -> Option<()> {
let arg = self.stack_value(0).try_as_integer()?;
let rcvr = self.stack_value(1).try_as_integer()?;
let result = OOP::try_from_integer(rcvr.checked_add(arg)?)?;
self.popn(2);
self.push(result);
Some(())
}
fn prim_sub(&mut self) -> Option<()> {
let arg = self.stack_value(0).try_as_integer()?;
let rcvr = self.stack_value(1).try_as_integer()?;
let result = OOP::try_from_integer(rcvr.checked_sub(arg)?)?;
self.popn(2);
self.push(result);
Some(())
}
fn prim_mul(&mut self) -> Option<()> {
let arg = self.stack_value(0).try_as_integer()?;
let rcvr = self.stack_value(1).try_as_integer()?;
let result = OOP::try_from_integer(rcvr.checked_mul(arg)?)?;
self.popn(2);
self.push(result);
Some(())
}
fn prim_divide(&mut self) -> Option<()> {
let arg = self.stack_value(0).try_as_integer()?;
let rcvr = self.stack_value(1).try_as_integer()?;
if arg == 0 {
return None;
} else if rcvr % arg != 0 {
return None;
}
let result = OOP::try_from_integer(rcvr / arg)?;
self.popn(2);
self.push(result);
Some(())
}
fn prim_mod(&mut self) -> Option<()> {
// round towards -inf; 0 <= mod < arg
let mut arg = self.stack_value(0).try_as_integer()?;
let mut rcvr = self.stack_value(1).try_as_integer()?;
if arg == 0 {
return None
}
let (_div, rem) = floor_divmod(rcvr, arg);
let result = OOP::try_from_integer(rem)?;
self.popn(2);
self.push(result);
Some(())
}
fn prim_div(&mut self) -> Option<()> {
// round towards -inf
let mut arg = self.stack_value(0).try_as_integer()?;
let mut rcvr = self.stack_value(1).try_as_integer()?;
if arg == 0 {
return None;
}
let (div, _rem) = floor_divmod(rcvr, arg);
let result = OOP::try_from_integer(div)?;
self.popn(2);
self.push(result);
Some(())
}
fn prim_quo(&mut self) -> Option<()> {
// round towards 0
let arg = self.stack_value(0).try_as_integer()?;
let rcvr = self.stack_value(1).try_as_integer()?;
if arg == 0 {
return None;
}
let result = OOP::try_from_integer(rcvr / arg)?;
self.popn(2);
self.push(result);
Some(())
}
}
macro_rules! defprim_compare {
($name:ident, $op:tt) => {
fn $name(&mut self) -> Option<()> {
let arg = self.stack_value(0).try_as_integer()?;
let rcvr = self.stack_value(1).try_as_integer()?;
if DBG_CALL {
println!(concat!("SEND {} ", stringify!($op), " {}"), rcvr, arg);
}
self.popn(2);
if rcvr $op arg {
self.push(TRUE_PTR);
} else {
self.push(FALSE_PTR);
}
Some(())
}
}
}
impl Interpreter {
defprim_compare!(prim_eq, ==);
defprim_compare!(prim_ne, !=);
defprim_compare!(prim_lt, <);
defprim_compare!(prim_gt, >);
defprim_compare!(prim_le, <=);
defprim_compare!(prim_ge, >=);
fn prim_bitand(&mut self) -> Option<()> {
let arg = self.stack_value(0).try_as_integer()?;
let rcvr = self.stack_value(1).try_as_integer()?;
let result = OOP::try_from_integer(rcvr & arg)?;
self.popn(2);
self.push(result);
Some(())
}
fn prim_bitor(&mut self) -> Option<()> {
let arg = self.stack_value(0).try_as_integer()?;
let rcvr = self.stack_value(1).try_as_integer()?;
let result = OOP::try_from_integer(rcvr | arg)?;
self.popn(2);
self.push(result);
Some(())
}
fn prim_bitxor(&mut self) -> Option<()> {
let arg = self.stack_value(0).try_as_integer()?;
let rcvr = self.stack_value(1).try_as_integer()?;
let result = OOP::try_from_integer(rcvr ^ arg)?;
self.popn(2);
self.push(result);
Some(())
}
fn prim_bitshift(&mut self) -> Option<()> {
let arg = self.stack_value(0).try_as_integer()?;
let rcvr = self.stack_value(1).try_as_integer()?;
let result = if arg < 0 {
let arg = -arg as usize;
// TODO: Word size dependent
if arg > 15 {
// This will shift in the sign bit
OOP::try_from_integer(rcvr >> 15)?
} else {
OOP::try_from_integer(rcvr >> arg)?
}
} else {
let arg = arg as usize;
if arg > 15 {
return None
}
let res = OOP::try_from_integer(rcvr << arg)?;
if res.as_integer() >> arg == rcvr {
res
} else {
return None;
}
};
self.popn(2);
self.push(result);
Some(())
}
}
const CLASS_POINT_SIZE: usize = 2;
const CLASS_POINT_X: usize = 0;
const CLASS_POINT_Y: usize = 1;
impl Interpreter {
fn prim_mk_point(&mut self) -> Option<()> {
let arg = self.stack_value(0);
let rcvr = self.stack_value(1);
arg.try_as_integer()?;
rcvr.try_as_integer()?;
let result =
self.instantiate_class(CLASS_POINT_PTR, CLASS_POINT_SIZE, ObjectLayout::Pointer);
self.memory.put_ptr(result, CLASS_POINT_X, rcvr);
self.memory.put_ptr(result, CLASS_POINT_Y, arg);
self.popn(2);
self.push(result);
Some(())
}
}
macro_rules! defprim_flt_compare {
($name:ident, $op:tt) => {
fn $name(&mut self) -> Option<()> {
let arg = self.get_float(self.stack_value(0))?;
let rcvr = self.get_float(self.stack_value(1))?;
self.popn(2);
if rcvr $op arg {
self.push(TRUE_PTR);
} else {
self.push(FALSE_PTR);
}
Some(())
}
}
}
macro_rules! defprim_flt_arith {
($name:ident, $op:tt) => {
fn $name(&mut self) -> Option<()> {
let arg = self.get_float(self.stack_value(0))?;
let rcvr = self.get_float(self.stack_value(1))?;
let res = self.new_float(rcvr $op arg);
self.popn(2);
self.push(res);
Some(())
}
}
}
// Floating point
impl Interpreter {
fn get_float(&self, oop: OOP) -> Option<f32> {
if oop.is_integer() {
return Some(oop.as_integer() as f32);
}
return self.memory.get_float(oop);
}
fn dispatch_prim_float(&mut self) -> Option<()> {
match self.primitive_index {
40 => self.prim_as_float(),
41 => self.prim_float_add(),
42 => self.prim_float_sub(),
43 => self.prim_float_lt(),
44 => self.prim_float_gt(),
45 => self.prim_float_le(),
46 => self.prim_float_ge(),
47 => self.prim_float_eq(),
48 => self.prim_float_ne(),
49 => self.prim_float_mul(),
50 => self.prim_float_div(),
51 => self.prim_float_trunc(),
52 => self.prim_float_frac(),
53 => self.prim_float_exp(),
54 => self.prim_float_times_two_power(),
_ => None,
}
}
fn prim_as_float(&mut self) -> Option<()> {
let int = self.stack_value(0).try_as_integer()?;
let float = self.new_float(int as f32);
self.popn(1);
self.push(float);
Some(())
}
defprim_flt_arith!(prim_float_add, +);
defprim_flt_arith!(prim_float_sub, -);
defprim_flt_arith!(prim_float_mul, *);
defprim_flt_arith!(prim_float_div, /);
defprim_flt_compare!(prim_float_lt, <);
defprim_flt_compare!(prim_float_gt, >);
defprim_flt_compare!(prim_float_le, <=);
defprim_flt_compare!(prim_float_ge, >=);
defprim_flt_compare!(prim_float_eq, ==);
defprim_flt_compare!(prim_float_ne, !=);
fn prim_float_trunc(&mut self) -> Option<()> {
use crate::objectmemory::{SMALLINT_MAX, SMALLINT_MIN};
let rcvr = self.get_float(self.stack_top())?.trunc();
if rcvr < SMALLINT_MIN as f32 || rcvr > SMALLINT_MAX as f32 {
return None;
}
let int = OOP::try_from_integer(rcvr as Word)?;
self.popn(1);
self.push(int);
Some(())
}
fn prim_float_frac(&mut self) -> Option<()> {
let rcvr = self.get_float(self.stack_top())?;
let frac = self.new_float(rcvr.fract());
self.popn(1);
self.push(frac);
Some(())
}
fn prim_float_exp(&mut self) -> Option<()> {
let rcvr = self.get_float(self.stack_top())?;
let raw_exp = (rcvr.to_bits() >> 23) & 0xFF;
let unbiased_exp = raw_exp as Word - 127; // safe because 0xFF will always fit into a word
self.popn(1);
self.push(OOP::from(unbiased_exp));
Some(())
}
fn prim_float_times_two_power(&mut self) -> Option<()> {
let rcvr = self.get_float(self.stack_value(1))?;
let arg = self.stack_value(0);
let scale = if arg.is_integer() {
(arg.as_integer() as f32).exp2()
} else {
self.get_float(arg)?.exp2()
};
let result = self.new_float(scale * rcvr);
self.popn(2);
self.push(result);
Some(())
}
}
// Array and stream primitives
const STREAM_ARRAY_INDEX: usize = 0;
const STREAM_INDEX_INDEX: usize = 1;
const STREAM_READ_LIMIT_INDEX: usize = 2;
const STREAM_WRITE_LIMIT_INDEX: usize = 3;
impl Interpreter {
fn dispatch_prim_sub_and_stream(&mut self) -> Option<()> {
match self.primitive_index {
60 => self.prim_at(),
61 => self.prim_atput(),
62 => self.prim_size(),
63 => self.prim_string_at(),
64 => self.prim_string_atput(),
65 => self.prim_next(),
66 => self.prim_nextput(),
67 => self.prim_atend(),
_ => None,
}
}
fn check_indexable_bounds(&self, index: usize, array: OOP) -> Option<()> {
let klass = self.memory.get_class_of(array);
if index < 1 {
return None;
}
// Q: Is this correct? It seems fixed fields might all be pointers (e.g., CompiledMethod)
// A: CompiledMethod is a bytes object; the other fields have image-level support
if index + self.instance_specification(klass).fixed_fields() <= self.length_of(klass, array)
{
return Some(());
} else {
return None;
}
}
fn length_of(&self, klass: OOP, array: OOP) -> usize {
if self.instance_specification(klass).is_words() {
self.memory.get_word_length_of(array)
} else {
self.memory.get_byte_length_of(array)
}
}
fn vm_at(&mut self, array: OOP, index: usize) -> OOP {
let klass = self.memory.get_class_of(array);
let ispec = self.instance_specification(klass);
if ispec.is_words() {
if ispec.is_pointers() {
self.memory.get_ptr(array, index - 1)
} else {
self.long_integer_for(self.memory.get_word(array, index - 1) as UWord as usize)
}
} else {
OOP::from(self.memory.get_byte(array, index - 1) as Word)
}
}
fn vm_atput(&mut self, array: OOP, index: usize, value: OOP) -> Option<()> {
let klass = self.memory.get_class_of(array);
let ispec = self.instance_specification(klass);
if ispec.is_words() {
if ispec.is_pointers() {
self.memory.put_ptr(array, index - 1, value)
} else {
self.memory
.put_word(array, index - 1, self.long_integer_value_of(value)? as Word);
}
} else {
self.memory
.put_byte(array, index - 1, value.try_as_integer()? as u8);
}
Some(())
}
fn prim_at(&mut self) -> Option<()> {
let index = self.long_integer_value_of(self.stack_value(0))?;
let array = self.stack_value(1);
let klass = self.memory.get_class_of(array);
self.check_indexable_bounds(index, array)?;
let index = index + self.instance_specification(klass).fixed_fields();
let result = self.vm_at(array, index);
self.popn(2);
self.push(result);
Some(())
}
fn prim_atput(&mut self) -> Option<()> {
let value = self.stack_value(0);
let index = self.long_integer_value_of(self.stack_value(1))?;
let array = self.stack_value(2);
let klass = self.memory.get_class_of(array);
self.check_indexable_bounds(index, array)?;
let index = index + self.instance_specification(klass).fixed_fields();
self.vm_atput(array, index, value)?;
self.popn(3);
self.push(value);
Some(())
}
fn prim_size(&mut self) -> Option<()> {
let array = self.stack_top();
let klass = self.memory.get_class_of(array);
let length = self.long_integer_for(
self.length_of(klass, array) - self.instance_specification(klass).fixed_fields(),
);
self.popn(1);
self.push(length);
Some(())
}
fn prim_string_at(&mut self) -> Option<()> {
let index = self.long_integer_value_of(self.stack_value(0))?;
let array = self.stack_value(1);
self.check_indexable_bounds(index, array)?;
let ascii = self.vm_at(array, index).try_as_integer()?;
let chr = self.memory.get_ptr(CHARACTER_TABLE_PTR, ascii as usize);
self.popn(2);
self.push(chr);
Some(())
}
fn prim_string_atput(&mut self) -> Option<()> {
let character = self.stack_value(0);
let index = self.long_integer_value_of(self.stack_value(1))?;
let array = self.stack_value(2);
self.check_indexable_bounds(index, array)?;
if self.memory.get_class_of(character) != CLASS_CHARACTER_PTR {
return None;
}
let ascii = self.memory.get_ptr(character, 0);
self.vm_atput(array, index, ascii)?;
self.popn(3);
self.push(character);
Some(())
}
fn prim_next(&mut self) -> Option<()> {
let stream = self.stack_top();
let array = self.memory.get_ptr(stream, STREAM_ARRAY_INDEX);
let array_klass = self.memory.get_class_of(array);
let index = self.get_integer(stream, STREAM_INDEX_INDEX)?;
let limit = self.get_integer(stream, STREAM_READ_LIMIT_INDEX)?;
if index >= limit {
return None;
}
if array_klass != CLASS_ARRAY_PTR && array_klass != CLASS_STRING_PTR {
return None;
}
let index = index + 1;
self.check_indexable_bounds(index as usize, array)?;
let result = self.vm_at(array, index as usize);
self.put_integer(stream, STREAM_INDEX_INDEX, index)?;
self.popn(1);
if array_klass == CLASS_ARRAY_PTR {
self.push(result);
} else {
let char = self
.memory
.get_ptr(CHARACTER_TABLE_PTR, result.as_integer() as usize);
self.push(char);
}
Some(())
}
fn prim_nextput(&mut self) -> Option<()> {
let value = self.stack_value(0);
let stream = self.stack_value(1);
let array = self.memory.get_ptr(stream, STREAM_ARRAY_INDEX);
let array_klass = self.memory.get_class_of(array);
let index = self.get_integer(stream, STREAM_INDEX_INDEX)?;
let limit = self.get_integer(stream, STREAM_WRITE_LIMIT_INDEX)?;
if index >= limit {
return None;
}
if array_klass != CLASS_ARRAY_PTR && array_klass != CLASS_STRING_PTR {
return None;
}
let index = index + 1;
self.check_indexable_bounds(index as usize, array)?;
if array_klass == CLASS_ARRAY_PTR {
self.vm_atput(array, index as usize, value);
} else {
let ascii = self.memory.get_ptr(value, 0);
self.vm_atput(array, index as usize, ascii);
}
self.put_integer(stream, STREAM_INDEX_INDEX, index)?;
self.popn(2);
self.push(value);
Some(())
}
fn prim_atend(&mut self) -> Option<()> {
let stream = self.stack_top();
let array = self.memory.get_ptr(stream, STREAM_ARRAY_INDEX);
let array_klass = self.memory.get_class_of(array);
let length = self.length_of(array_klass, array);
let index = self.get_integer(stream, STREAM_INDEX_INDEX)?;
let limit = self.get_integer(stream, STREAM_READ_LIMIT_INDEX)?;
if array_klass != CLASS_ARRAY_PTR && array_klass != CLASS_STRING_PTR {
return None;
}
self.popn(1);
if index >= limit || index as usize >= length {
self.push(TRUE_PTR);
} else {
self.push(FALSE_PTR);
}
Some(())
}
}
// Storage primitives
impl Interpreter {
fn dispatch_prim_storage(&mut self) -> Option<()> {
match self.primitive_index {
68 => self.prim_object_at(),
69 => self.prim_object_atput(),
70 => self.prim_new(),
71 => self.prim_new_with_arg(),
72 => self.prim_become(),
73 => self.prim_inst_var_at(),
74 => self.prim_inst_var_atput(),
75 => self.prim_as_oop(),
76 => self.prim_as_object(),
77 => self.prim_some_instance(),
78 => self.prim_next_instance(),
79 => self.prim_new_method(),
_ => None,
}
}
fn prim_object_at(&mut self) -> Option<()> {
let index = self.stack_top().try_as_integer()?;
let receiver = self.stack_value(1);
if index <= 0 || index as usize > self.method_header_of(receiver).oop_count() {
return None;
}
self.popn(2);
self.push(self.memory.get_ptr(receiver, index as usize - 1));
Some(())
}
fn prim_object_atput(&mut self) -> Option<()> {
let value = self.stack_value(0);
let index = self.stack_value(1).try_as_integer()?;
let receiver = self.stack_value(2);
if index <= 0 || index as usize > self.method_header_of(receiver).oop_count() {
return None;
}
self.memory.put_ptr(receiver, index as usize - 1, value);
self.popn(2);
self.push(value);
Some(())
}
fn prim_new(&mut self) -> Option<()> {
let class = self.stack_value(0);
let ispec = self.instance_specification(class);
let size = ispec.fixed_fields();
if ispec.is_indexable() {
return None;
}
let obj = if ispec.is_pointers() {
self.instantiate_class(class, size, ObjectLayout::Pointer)
} else if ispec.is_words() {
self.instantiate_class(class, size, ObjectLayout::Word)
} else {
return None;
};
self.popn(1);
self.push(obj);
Some(())
}
fn prim_new_with_arg(&mut self) -> Option<()> {
let size = self.long_integer_value_of(self.stack_value(0))?;
let class = self.stack_value(1);
let ispec = self.instance_specification(class);
if !ispec.is_indexable() {
return None;
}
let size = size + ispec.fixed_fields();
let layout = if ispec.is_pointers() {
ObjectLayout::Pointer
} else if ispec.is_words() {
ObjectLayout::Word
} else {
ObjectLayout::Byte
};
let obj = self.instantiate_class(class, size, layout);
self.popn(2);
self.push(obj);
Some(())
}
fn prim_become(&mut self) -> Option<()> {
let other = self.stack_value(0);
let this = self.stack_value(1);
if other.is_integer() || this.is_integer() {
return None;
}
self.memory.swap_pointers(this, other);
self.popn(1);
Some(())
}
fn check_ivar_bounds_of(&self, object: OOP, index: usize) -> Option<()> {
if index >= 1 && index <= self.length_of(self.memory.get_class_of(object), object) {
Some(())
} else {
None
}
}
fn prim_inst_var_at(&mut self) -> Option<()> {
let index = self.stack_value(0).try_as_integer()? as usize;
let receiver = self.stack_value(1);
self.check_ivar_bounds_of(receiver, index as usize)?;
let obj = self.vm_at(receiver, index);
self.popn(2);
self.push(obj);
Some(())
}
fn prim_inst_var_atput(&mut self) -> Option<()> {
let value = self.stack_value(0);
let index = self.stack_value(1).try_as_integer()? as usize;
let receiver = self.stack_value(2);
self.check_ivar_bounds_of(receiver, index as usize)?;
self.vm_atput(receiver, index, value);
self.popn(3);
self.push(value);
Some(())
}
fn prim_as_object(&mut self) -> Option<()> {
let rcvr = self.stack_value(0);
if rcvr.is_integer() {
self.popn(1);
self.push(rcvr.to_pointer());
Some(())
} else {
None
}
}
fn prim_as_oop(&mut self) -> Option<()> {
let rcvr = self.stack_top();
if rcvr.is_object() {
self.popn(1);
self.push(rcvr.to_smallint());
Some(())
} else {
None
}
}
fn prim_some_instance(&mut self) -> Option<()> {
let class = self.stack_top();
self.memory.initial_instance_of(class).map(|obj| {
self.pop();
self.push(obj);
})
}
fn prim_next_instance(&mut self) -> Option<()> {
let obj = self.stack_top();
self.memory.next_instance_of(obj).map(|obj| {
self.pop();
self.push(obj);
})
}
fn prim_new_method(&mut self) -> Option<()> {
let header = self.stack_value(0);
if !header.is_integer() {
return None;
}
let parsed_header = MethodHeader::new(header);
if parsed_header.flag_value() == HeaderFlag::HeaderExt && parsed_header.literal_count() < 2 {
return None
}
let bytecode_count = self.stack_value(1).try_as_integer()?;
let class = self.stack_value(2);
if bytecode_count < 0 {
return None;
}
let size = (parsed_header.literal_count() + 1) * OOP::byte_size()
+ bytecode_count as usize;
let method = self.instantiate_class(class, size, ObjectLayout::Byte);
self.memory.put_ptr(method, 0, header);
if parsed_header.flag_value() == HeaderFlag::HeaderExt {
// make sure that the header is valid, if existing
self.memory.put_ptr(method, parsed_header.literal_count() + LITERAL_START - 2, OOP::from(0));
}
self.popn(3);
self.push(method);
Some(())
}
}
// Control primitives
impl Interpreter {
fn dispatch_prim_control(&mut self) -> Option<()> {
match self.primitive_index {
80 => self.prim_block_copy(),
81 => self.prim_value(),
82 => self.prim_value_with_args(),
83 => self.prim_perform(),
84 => self.prim_perform_with_args(),
85 => self.prim_signal(),
86 => self.prim_wait(),
87 => self.prim_resume(),
88 => self.prim_suspend(),
89 => self.prim_flush_cache(),
_ => None,
}
}
fn prim_block_copy(&mut self) -> Option<()> {
let block_argcount = self.stack_value(0);
let ctx = self.stack_value(1);
let method_ctx = if self.is_block_ctx(ctx) {
self.memory.get_ptr(ctx, CTX_HOME_INDEX)
} else {
ctx
};
let ctx_size = self.memory.get_word_length_of(method_ctx);
let new_ctx =
self.instantiate_class(CLASS_BLOCK_CONTEXT_PTR, ctx_size, ObjectLayout::Pointer);
let iip = OOP::from(self.ip as i16 + 3);
self.memory.put_ptr(new_ctx, CTX_INITIAL_IP_INDEX, iip);
self.memory.put_ptr(new_ctx, CTX_IP_INDEX, iip);
self.context_put_sp(new_ctx, 0);
self.memory
.put_ptr(new_ctx, CTX_BLOCK_ARG_COUNT_INDEX, block_argcount);
self.memory.put_ptr(new_ctx, CTX_HOME_INDEX, method_ctx);
self.popn(2);
self.push(new_ctx);
Some(())
}
fn prim_value(&mut self) -> Option<()> {
let block_context = self.stack_value(self.argument_count);
let argcount = self.block_argument_count(block_context);
if self.argument_count != argcount {
println!("Wrong argcount (got {}, expected {})", self.argument_count, argcount);
return None;
}
self.memory.transfer_fields(
argcount,
self.active_context,
self.sp + 1 - argcount,
block_context,
CTX_TEMPFRAME_START_INDEX,
);
self.popn(argcount + 1);
let iip = self.memory.get_ptr(block_context, CTX_INITIAL_IP_INDEX);
self.memory.put_ptr(block_context, CTX_IP_INDEX, iip);
self.context_put_sp(block_context, argcount as Word);
self.memory
.put_ptr(block_context, CTX_CALLER_INDEX, self.active_context);
// self.popn(1);
self.new_active_context(block_context);
Some(())
}
fn prim_value_with_args(&mut self) -> Option<()> {
let arg_array = self.stack_value(0);
let block_ctx = self.stack_value(1);
let block_argcount = self.block_argument_count(block_ctx);
let array_class = self.memory.get_class_of(arg_array);
if array_class != CLASS_ARRAY_PTR {
return None;
}
let array_argcount = self.memory.get_word_length_of(arg_array);
if array_argcount != block_argcount {
return None;
}
self.memory.transfer_fields(
array_argcount,
arg_array,
0,
block_ctx,
CTX_TEMPFRAME_START_INDEX,
);
let iip = self.memory.get_ptr(block_ctx, CTX_INITIAL_IP_INDEX);
self.memory.put_ptr(block_ctx, CTX_IP_INDEX, iip);
self.context_put_sp(block_ctx, array_argcount as Word);
self.memory
.put_ptr(block_ctx, CTX_CALLER_INDEX, self.active_context);
self.popn(2);
self.new_active_context(block_ctx);
Some(())
}
fn prim_perform(&mut self) -> Option<()> {
let perform_selector = self.message_selector;
self.message_selector = self.stack_value(self.argument_count - 1);
let new_rcvr = self.stack_value(self.argument_count);
self.lookup_method_in_class(self.memory.get_class_of(new_rcvr));
if self.argument_count(self.new_method) != self.argument_count - 1 {
self.message_selector = perform_selector;
return None;
} else {
let selector_index = self.sp - self.argument_count + 1;
self.memory.transfer_fields(
self.argument_count - 1,
self.active_context,
selector_index + 1,
self.active_context,
selector_index,
);
self.popn(1);
self.argument_count -= 1;
self.execute_new_method();
return Some(());
}
}
fn prim_perform_with_args(&mut self) -> Option<()> {
let argument_array = self.stack_value(0);
let array_size = self.memory.get_word_length_of(argument_array);
if self.sp + array_size >= self.memory.get_word_length_of(self.active_context) {
return None;
} else if self.memory.get_class_of(argument_array) != CLASS_ARRAY_PTR {
return None;
}
self.popn(1);
let perform_selector = self.message_selector;
self.message_selector = self.pop();
let this_rcvr = self.stack_top();
self.argument_count = array_size;
for i in 0..self.argument_count {
self.push(self.memory.get_ptr(argument_array, i));
}
self.lookup_method_in_class(self.memory.get_class_of(this_rcvr));
if self.argument_count(self.new_method) == self.argument_count {
self.execute_new_method();
return Some(());
} else {
// BUG: really? I think this should be popn
self.popn(self.argument_count);
self.push(self.message_selector);
self.push(argument_array);
self.argument_count = 2;
self.message_selector = perform_selector;
return None;
}
}
}
// class ProcessorScheduler
const PROCESS_LISTS_INDEX: usize = 0;
const ACTIVE_PROCESS_INDEX: usize = 1;
// class LinkedList
const FIRST_LINK_INDEX: usize = 0;
const LAST_LINK_INDEX: usize = 1;
// class Semaphore
const EXCESS_SIGNALS_INDEX: usize = 2;
// class Link
const NEXT_LINK_INDEX: usize = 0;
// class Process
const SUSPENDED_CONTEXT_INDEX: usize = 1;
const PRIOTITY_INDEX: usize = 2;
const MY_LIST_INDEX: usize = 3;
// process scheduling
impl Interpreter {
fn asynchronous_signal(&mut self, semaphore: OOP) {
if semaphore != NIL_PTR {
self.semaphore_list.push(semaphore);
}
}
fn synchronous_signal(&mut self, semaphore: OOP) -> Option<()> {
if semaphore == NIL_PTR {
println!("Signalled nil");
return Some(())
}
if self.is_empty_list(semaphore) {
let excess_signals = self
.memory
.get_ptr(semaphore, EXCESS_SIGNALS_INDEX)
.try_as_integer()?;
self.memory.put_ptr(
semaphore,
EXCESS_SIGNALS_INDEX,
OOP::try_from_integer(excess_signals + 1)?,
);
Some(())
} else {
let process = self.remove_first_link_of_list(semaphore);
// println!("Resumed {:?} from semaphore signal", process);
self.resume(process)
}
}
fn transfer_to(&mut self, process: OOP) {
self.new_process = Some(process)
}
fn check_process_switch(&mut self) {
while let Some(semaphore) = self.semaphore_list.pop() {
self.synchronous_signal(semaphore);
}
if let Some(process) = self.new_process.take() {
// println!("Switched process");
let active_process = self.active_process();
self.memory
.put_ptr(active_process, SUSPENDED_CONTEXT_INDEX, self.active_context);
self.memory
.put_ptr(self.scheduler_pointer(), ACTIVE_PROCESS_INDEX, process);
self.new_active_context(self.memory.get_ptr(process, SUSPENDED_CONTEXT_INDEX));
}
}
fn active_process(&self) -> OOP {
self.new_process.unwrap_or_else(|| {
self.memory
.get_ptr(self.scheduler_pointer(), ACTIVE_PROCESS_INDEX)
})
}
fn scheduler_pointer(&self) -> OOP {
self.memory.get_ptr(SCHEDULER_ASSOCIATION_PTR, VALUE_INDEX)
}
fn first_context(&mut self) -> OOP {
self.new_process = None;
self.memory
.get_ptr(self.active_process(), SUSPENDED_CONTEXT_INDEX)
}
fn remove_first_link_of_list(&mut self, linked_list: OOP) -> OOP {
// TODO: refcount unsafe
let first_link = self.memory.get_ptr(linked_list, FIRST_LINK_INDEX);
let last_link = self.memory.get_ptr(linked_list, LAST_LINK_INDEX);
if last_link == first_link {
self.memory.put_ptr(linked_list, FIRST_LINK_INDEX, NIL_PTR);
self.memory.put_ptr(linked_list, LAST_LINK_INDEX, NIL_PTR);
} else {
let next_link = self.memory.get_ptr(first_link, NEXT_LINK_INDEX);
self.memory
.put_ptr(linked_list, FIRST_LINK_INDEX, next_link);
}
self.memory.put_ptr(first_link, NEXT_LINK_INDEX, NIL_PTR);
return first_link;
}
fn add_last_link_to_list(&mut self, linked_list: OOP, link: OOP) {
// TODO: refcount unsafe
if self.is_empty_list(linked_list) {
self.memory.put_ptr(linked_list, FIRST_LINK_INDEX, link);
} else {
let last_link = self.memory.get_ptr(linked_list, LAST_LINK_INDEX);
self.memory.put_ptr(last_link, NEXT_LINK_INDEX, link);
}
self.memory.put_ptr(linked_list, LAST_LINK_INDEX, link);
self.memory.put_ptr(link, MY_LIST_INDEX, linked_list);
}
fn is_empty_list(&self, linked_list: OOP) -> bool {
// TODO: refcount unsafe
self.memory.get_ptr(linked_list, FIRST_LINK_INDEX) == NIL_PTR
}
fn wake_highest_priority(&mut self) -> OOP {
let process_lists = self
.memory
.get_ptr(self.scheduler_pointer(), PROCESS_LISTS_INDEX);
let mut priority = self.memory.get_word_length_of(process_lists);
loop {
let process_list = self.memory.get_ptr(process_lists, priority - 1);
if !self.is_empty_list(process_list) {
// println!("Woke process at priority {}", priority);
return self.remove_first_link_of_list(process_list);
}
if priority == 0 {
panic!("No processes left to run");
}
priority -= 1;
}
}
fn sleep(&mut self, process: OOP) -> Option<()> {
let priority = self.get_integer(process, PRIOTITY_INDEX)?;
if priority < 1 {
panic!("Priority in the basement: {}", priority);
}
let process_lists = self
.memory
.get_ptr(self.scheduler_pointer(), PROCESS_LISTS_INDEX);
let process_list = self.memory.get_ptr(process_lists, priority as usize - 1);
self.add_last_link_to_list(process_list, process);
Some(())
}
fn suspend_active(&mut self) {
let process = self.wake_highest_priority();
self.transfer_to(process);
}
fn resume(&mut self, process: OOP) -> Option<()> {
let active_process = self.active_process();
let active_priority = self.get_integer(active_process, PRIOTITY_INDEX)?;
let new_priority = self.get_integer(process, PRIOTITY_INDEX)?;
if new_priority > active_priority {
self.sleep(active_process)?;
self.transfer_to(process);
Some(())
} else {
self.sleep(process)
}
}
fn prim_signal(&mut self) -> Option<()> {
self.synchronous_signal(self.stack_top())
}
fn prim_wait(&mut self) -> Option<()> {
let rcvr = self.stack_top();
let excess_signals = self.get_integer(rcvr, EXCESS_SIGNALS_INDEX)?;
if excess_signals > 0 {
// println!("Process {:?} waits on {:?}", self.active_process(), rcvr);
self.put_integer(rcvr, EXCESS_SIGNALS_INDEX, excess_signals - 1)
} else {
self.add_last_link_to_list(rcvr, self.active_process());
self.suspend_active();
Some(())
}
}
fn prim_resume(&mut self) -> Option<()> {
self.resume(self.stack_top())
}
fn prim_suspend(&mut self) -> Option<()> {
if self.stack_top() != self.active_process() {
None
} else {
self.pop();
self.push(NIL_PTR);
self.suspend_active();
Some(())
}
}
fn prim_flush_cache(&mut self) -> Option<()> {
self.method_cache = [MethodCacheEntry::default(); 256];
Some(())
}
}
struct DisplayState {
display: OOP,
cursor: OOP,
// if linked, cursor_location is None
last_event: u128,
cursor_location: Option<(isize, isize)>,
mouse_location: (isize, isize),
mouse_delay_start: Option<Instant>,
mouse_queued: bool,
input_semaphore: OOP,
input_queue: VecDeque<UWord>,
sample_interval_ms: usize,
}
#[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
enum StEvent {
PointerPos(UWord, UWord),
/// Device, down
Bistate(UWord, bool),
}
// IO primitives
impl Interpreter {
fn push_event_time(&mut self) {
// push time code
let elapsed = self.startup_time.elapsed().as_millis();
let dt = (elapsed - self.display.last_event) as u32;
self.display.last_event = elapsed;
if dt == 0 {
// do nothing
} else if dt < 0x1000 {
self.push_event_word(dt as UWord);
} else {
let abstime = elapsed as u32;
self.push_event_word(0x5000);
self.push_event_word((abstime >> 16) as UWord);
self.push_event_word(abstime as UWord);
}
}
fn send_mouse_update(&mut self, new_event: bool) {
self.display.mouse_queued |= new_event;
if let Some(elapsed) = self.display.mouse_delay_start.as_ref().map(Instant::elapsed) {
if elapsed.as_millis() as usize > self.display.sample_interval_ms {
self.display.mouse_delay_start = None;
}
}
if self.display.mouse_delay_start.is_none() && self.display.mouse_queued {
let (x,y) = self.display.mouse_location;
self.push_event_time();
self.push_event_word((x as UWord & 0xFFF) | 0x1000);
self.push_event_word((y as UWord & 0xFFF) | 0x2000);
self.display.mouse_queued = false;
self.display.mouse_delay_start = Some(Instant::now());
}
}
fn push_event(&mut self, event: StEvent) {
match event {
StEvent::PointerPos(x, y) => {
self.display.mouse_location = (x as isize, y as isize);
self.send_mouse_update(false);
}
StEvent::Bistate(dev, down) => {
self.push_event_time();
let tag = if down { 0x3000 } else { 0x4000 };
self.push_event_word(dev | tag);
}
}
}
fn push_event_word(&mut self, word: UWord) {
// println!("Sent word {:04x}", word);
self.display.input_queue.push_back(word);
self.synchronous_signal(self.display.input_semaphore);
}
fn dispatch_prim_io(&mut self) -> Option<()> {
// println!("Dispatch {}", self.primitive_index);
match self.primitive_index {
90 => self.prim_mouse_point(),
91 => self.prim_cursor_loc_put(),
92 => self.prim_cursor_link(),
93 => self.prim_input_semaphore(),
94 => self.prim_sample_interval(),
95 => self.prim_input_word(),
96 => self.prim_copy_bits(), // in bitblt
97 => self.prim_snapshot(),
98 => self.prim_time_words_into(),
99 => self.prim_tick_words_into(),
100 => self.prim_signal_at_tick(),
101 => self.prim_be_cursor(),
102 => self.prim_be_display(),
103 => self.prim_scan_characters(),
104 => self.prim_draw_loop(),
105 => self.prim_string_replace(),
_ => None,
}
}
fn prim_mouse_point(&mut self) -> Option<()> {
let pt =
self.instantiate_class(CLASS_POINT_PTR, CLASS_POINT_SIZE, ObjectLayout::Pointer);
self.memory.put_ptr(
pt,
CLASS_POINT_X,
OOP::from(self.display.mouse_location.0 as Word),
);
self.memory.put_ptr(
pt,
CLASS_POINT_Y,
OOP::from(self.display.mouse_location.1 as Word),
);
self.pop(); // pop receiver
self.push(pt);
Some(())
}
fn prim_cursor_loc_put(&mut self) -> Option<()> {
let pt = self.stack_top();
let pt_x = self.get_integer(pt, CLASS_POINT_X)? as isize;
let pt_y = self.get_integer(pt, CLASS_POINT_Y)? as isize;
self.pop();
{
let target = self
.display
.cursor_location
.as_mut()
.unwrap_or(&mut self.display.mouse_location);
target.0 = pt_x;
target.1 = pt_y;
}
if self.display.cursor_location.is_none() {
self.display_impl.move_mouse((pt_x, pt_y))
}
Some(())
}
fn prim_cursor_link(&mut self) -> Option<()> {
let flag = self.pop();
// TODO: Handle non-boolean
if flag == TRUE_PTR {
self.display.cursor_location = None;
} else {
self.display.cursor_location = Some(self.display.mouse_location);
}
Some(())
}
fn prim_input_semaphore(&mut self) -> Option<()> {
// TODO: error handling
println!("Set input semaphore to {:?}", self.stack_top());
self.display.input_semaphore = self.pop();
Some(())
}
fn prim_sample_interval(&mut self) -> Option<()> {
let value = self.stack_top().try_as_integer()?;
self.pop();
self.display.sample_interval_ms = value as UWord as usize;
Some(())
}
fn prim_input_word(&mut self) -> Option<()> {
let word = self.display.input_queue.pop_front()?;
// println!("Input word {:04x}", word);
let item = if word >= 0x4000 {
// println!("Unexpectedly long word");
self.long_integer_for(word as usize)
} else {
OOP::try_from_integer(word as Word)?
};
self.pop(); // pop receiver
self.push(item);
Some(())
}
fn prim_snapshot(&mut self) -> Option<()> {
println!("Snapshot!");
// save the active context
self.save_ctx();
let proc = self.active_process();
self.memory.put_ptr(proc, SUSPENDED_CONTEXT_INDEX, self.active_context);
self.gc();
use crate::objectmemory::{ImageFormat, text_format::TextFormat};
if let Err(err) = TextFormat::save("snapshot.dump", &self.memory) {
eprintln!("Encountered error {}", err);
None
} else {
self.memory.put_ptr(proc, SUSPENDED_CONTEXT_INDEX, NIL_PTR);
self.pop();
self.push(NIL_PTR);
Some(())
// unset suspended context
}
}
fn prim_time_words_into(&mut self) -> Option<()> {
let unix_time = ::std::time::SystemTime::now()
.duration_since(::std::time::UNIX_EPOCH)
.ok()?
.as_secs();
let st_time = unix_time + 2177452800;
let result_array = self.stack_value(0);
for i in 0..4 {
self.vm_atput(
result_array,
i+1,
OOP::try_from_integer(((st_time >> (8 * i)) & 0xFF) as Word)?,
);
}
self.pop();
// TODO: return result array or self? Right now, returns self
Some(())
}
fn prim_tick_words_into(&mut self) -> Option<()> {
let unix_time = self.time_millis();
let result_array = self.stack_value(0);
for i in 0..4 {
self.vm_atput(
result_array,
i+1,
OOP::try_from_integer(((unix_time >> (8 * i)) & 0xFF) as Word)?,
);
}
print!("Fetched time: ");
for i in 0..4 {
print!("{:02x}", self.memory.get_byte(result_array, i))
}
println!();
self.pop();
// TODO: return result array or self? Right now, returns self
Some(())
}
fn prim_signal_at_tick(&mut self) -> Option<()> {
let when_array = self.stack_value(0);
let semaphore = self.stack_value(1);
let mut when = 0;
println!("Scheduling timer... at {}", self.obj_name(when_array));
let bytelen = self.memory.get_byte_length_of(when_array);
print!("Raw time: ");
for i in 0..bytelen {
let byte = self.memory.get_byte(when_array, i) as u32;
print!("{:2x}", byte);
when |= byte << (i * 8);
// when = (when << 8) | byte;
}
println!();
println!("Scheduled timer for {:8x}", when);
println!("Now is {:8x}", self.time_millis());
self.timer_when = when;
self.timer_sem = Some(semaphore);
self.popn(1);
Some(())
}
fn prim_be_cursor(&mut self) -> Option<()> {
self.display.cursor = self.stack_top();
Some(())
}
fn prim_be_display(&mut self) -> Option<()> {
self.display.display = self.stack_top();
self::display::notice_new_display(self);
Some(())
}
fn prim_scan_characters(&mut self) -> Option<()> {
// TODO: Implement me
None
}
fn prim_draw_loop(&mut self) -> Option<()> {
// TODO: implement me
None
}
fn prim_string_replace(&mut self) -> Option<()> {
// TODO: implement me
None
}
}
// System primitives
impl Interpreter {
fn dispatch_prim_system(&mut self) -> Option<()> {
match self.primitive_index {
110 => self.prim_equiv(),
111 => self.prim_class(),
112 => self.prim_core_left(),
113 => self.prim_quit(),
114 => self.prim_debug(),
115 => self.prim_oops_left(),
116 => self.prim_signal_at_oops_left_words_left(),
_ => None,
}
}
fn prim_equiv(&mut self) -> Option<()> {
let other = self.pop();
let this = self.pop();
if this == other {
self.push(TRUE_PTR);
} else {
self.push(FALSE_PTR);
}
Some(())
}
fn prim_class(&mut self) -> Option<()> {
let instance = self.pop();
self.push(self.memory.get_class_of(instance));
Some(())
}
fn prim_core_left(&mut self) -> Option<()> {
self.pop();
// more than I can say, for sure...
let result = self.long_integer_for(0xFFFFFFFF);
self.push(result);
Some(())
}
fn prim_quit(&mut self) -> Option<()> {
println!("Exit requested");
::std::process::exit(0);
}
fn prim_debug(&mut self) -> Option<()> {
println!("Drop into debugger");
Some(())
}
fn prim_oops_left(&mut self) -> Option<()> {
self.pop();
self.push(OOP::from(self.memory.oops_left() as Word));
Some(())
}
fn prim_signal_at_oops_left_words_left(&mut self) -> Option<()> {
println!("signal:atOopsLeft:wordsLeft:");
self.popn(3);
Some(())
}
}
// Private primitives
impl Interpreter {
fn dispatch_prim_private(&mut self) -> Option<()> {
match self.primitive_index {
// start at 128
_ => None,
}
}
/*
fn prim_(&mut self) -> Option<()> {
Some(())
}
fn prim_(&mut self) -> Option<()> {
Some(())
}
*/
}
// Regular processing...
impl Interpreter {
fn time_millis(&self) -> u32 {
// SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u32
self.startup_time.elapsed().as_millis() as u32
}
fn interruption_point(&mut self) {
// Any queued semaphores?
if self.timer_sem.is_some()
&& u32::wrapping_sub(self.time_millis(), self.timer_when) < 0x7FFF_FFFF
{
println!("Timer semaphore triggered");
let sem = self.timer_sem.take().unwrap();
self.synchronous_signal(sem);
}
// Any display processing?
self::display::poll_display(self);
}
}
pub fn read_st_string(memory: &ObjectMemory, oop: OOP) -> Cow<str> {
String::from_utf8_lossy(memory.get_bytes(oop))
}
impl Interpreter {
pub fn class_name(&self, klass: OOP) -> Cow<str> {
let name = self.memory.get_ptr(klass, 6); // Class::name
if self.memory.get_class_of(name) != OOP::pointer(0x1C) {
return Cow::Borrowed("Wut?");
} else {
read_st_string(&self.memory, name)
}
}
pub fn obj_name(&self, obj: OOP) -> String {
if obj.is_integer() {
return obj.as_integer().to_string()
} else if obj == NIL_PTR {
return "nil".to_string()
}
let name = match self.memory.get_class_of(obj) {
OOP(0x38) => format!("#{}", read_st_string(&self.memory, obj)),
CLASS_STRING_PTR => format!("{:?}", read_st_string(&self.memory, obj)),
CLASS_LARGE_POSITIVEINTEGER_PTR => {
let digits = self.memory.get_bytes(obj);
let mut result = format!("LargePositiveInteger({}, ", digits.len());
for digit in digits.iter().rev() {
result += &format!("{:02x}", digit);
}
result.push(')');
result
},
CLASS_POINT_PTR => {
let x = self.memory.get_ptr(obj, 0);
let y = self.memory.get_ptr(obj, 1);
format!("({} @ {})", self.obj_name(x), self.obj_name(y))
},
CLASS_CHARACTER_PTR => {
let chr = self.memory.get_ptr(obj, 0);
if chr.is_integer() && chr.as_integer() >= 0 && chr.as_integer() <= 255 {
format!("{:?}", chr.as_integer() as u8 as char)
} else {
format!("aCharacter({:?})", chr)
}
}
klass => {
if self.memory.get_class_of(klass) == OOP::pointer(0x1E) {
format!("({} class)", self.class_name(obj))
} else {
format!("a{}", self.class_name(klass))
}
}
};
format!("{}({:?})", name, obj)
}
pub fn print_methodcall(&self) -> String{
let rcvr = self.stack_value(self.argument_count);
let mut result =
format!("{} {}",
self.obj_name(rcvr),
read_st_string(&self.memory, self.message_selector));
for i in 0..self.argument_count {
let arg = self.stack_value(self.argument_count - i - 1);
result += &format!(" {}", self.obj_name(arg));
}
result
}
} |
//! Various tools and techniques for working with expressions
pub mod rpn;
pub mod shunting_yard;
|
use super::stream::Record;
use crate::common;
use crate::common::types::Value;
use json;
use ordered_float::OrderedFloat;
use regex::Regex;
use url;
use json::JsonValue;
use linked_hash_map::LinkedHashMap;
use std::fmt;
use std::fs::File;
use std::io;
use std::io::BufRead;
use std::path::Path;
use std::result;
use std::str::FromStr;
lazy_static! {
static ref SPLIT_READER_LINE_REGEX: Regex =
Regex::new(r#"[^\s"'\[\]]+|"([^"]*)"|'([^']*)'|\[([^\[\]]*)\]"#).unwrap();
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub(crate) enum DataType {
DateTime,
String,
Integral,
Float,
Host,
HttpRequest,
}
impl fmt::Display for DataType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let name = match self {
DataType::DateTime => "DateTime",
DataType::String => "String",
DataType::Integral => "Integral",
DataType::Float => "Float",
DataType::Host => "Host",
DataType::HttpRequest => "HttpRequest",
};
write!(f, "{}", name)
}
}
lazy_static! {
static ref AWS_ELB_DATATYPES: Vec<DataType> = {
vec![
DataType::DateTime,
DataType::String,
DataType::Host,
DataType::Host,
DataType::Float,
DataType::Float,
DataType::Float,
DataType::String,
DataType::String,
DataType::Integral,
DataType::Integral,
DataType::HttpRequest,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
]
};
}
lazy_static! {
static ref AWS_ELB_FIELD_NAMES: Vec<String> = {
vec![
"timestamp".to_string(),
"elbname".to_string(),
"client_and_port".to_string(),
"backend_and_port".to_string(),
"request_processing_time".to_string(),
"backend_processing_time".to_string(),
"response_processing_time".to_string(),
"elb_status_code".to_string(),
"backend_status_code".to_string(),
"received_bytes".to_string(),
"sent_bytes".to_string(),
"request".to_string(),
"user_agent".to_string(),
"ssl_cipher".to_string(),
"ssl_protocol".to_string(),
"target_group_arn".to_string(),
"trace_id".to_string(),
]
};
}
lazy_static! {
static ref AWS_ALB_DATATYPES: Vec<DataType> = {
vec![
DataType::String,
DataType::DateTime,
DataType::String,
DataType::Host,
DataType::Host,
DataType::Float,
DataType::Float,
DataType::Float,
DataType::String,
DataType::String,
DataType::Integral,
DataType::Integral,
DataType::HttpRequest,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
]
};
}
lazy_static! {
static ref AWS_ALB_FIELD_NAMES: Vec<String> = {
vec![
"type".to_string(),
"timestamp".to_string(),
"elb".to_string(),
"client_and_port".to_string(),
"target_and_port".to_string(),
"request_processing_time".to_string(),
"target_processing_time".to_string(),
"response_processing_time".to_string(),
"elb_status_code".to_string(),
"target_status_code".to_string(),
"received_bytes".to_string(),
"sent_bytes".to_string(),
"request".to_string(),
"user_agent".to_string(),
"ssl_cipher".to_string(),
"ssl_protocol".to_string(),
"target_group_arn".to_string(),
"trace_id".to_string(),
"domain_name".to_string(),
"chosen_cert_arn".to_string(),
"matched_rule_priority".to_string(),
"request_creation_time".to_string(),
"action_executed".to_string(),
"redirect_url".to_string(),
"error_reason".to_string(),
]
};
}
lazy_static! {
static ref AWS_S3_FIELD_NAMES: Vec<String> = {
vec![
"bucket_owner".to_string(),
"bucket".to_string(),
"time".to_string(),
"remote_ip".to_string(),
"requester".to_string(),
"request_id".to_string(),
"operation".to_string(),
"key".to_string(),
"request_uri".to_string(),
"http_status".to_string(),
"error_code".to_string(),
"bytes_sent".to_string(),
"object_size".to_string(),
"total_time".to_string(),
"turn_around_time".to_string(),
"refererr".to_string(),
"user_agent".to_string(),
"version_id".to_string(),
"host_id".to_string(),
"signature_version".to_string(),
"cipher_suite".to_string(),
"authentication_type".to_string(),
"host_header".to_string(),
"tls_version".to_string(),
]
};
}
lazy_static! {
static ref AWS_S3_DATATYPES: Vec<DataType> = {
vec![
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
]
};
}
lazy_static! {
static ref SQUID_FIELD_NAMES: Vec<String> = {
vec![
"timestamp".to_string(),
"elapsed".to_string(),
"remote_host".to_string(),
"code_and_status".to_string(),
"bytes".to_string(),
"method".to_string(),
"url".to_string(),
"rfc931".to_string(),
"peer_status_and_peer_host".to_string(),
"type".to_string(),
]
};
}
lazy_static! {
static ref SQUID_DATATYPES: Vec<DataType> = {
vec![
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
DataType::String,
]
};
}
//Reference: https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html
pub(crate) enum ClassicLoadBalancerLogField {
Timestamp = 0,
Elbname = 1,
ClientAndPort = 2,
BackendAndPort = 3,
RequestProcessingTime = 4,
BackendProcessingTime = 5,
ResponseProcessingTime = 6,
ELBStatusCode = 7,
BackendStatusCode = 8,
ReceivedBytes = 9,
SentBytes = 10,
Request = 11,
UserAgent = 12,
SSLCipher = 13,
SSLProtocol = 14,
TargetGroupArn = 15,
TraceID = 16,
}
impl fmt::Display for ClassicLoadBalancerLogField {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let name = match self {
ClassicLoadBalancerLogField::Timestamp => "timestamp",
ClassicLoadBalancerLogField::Elbname => "elbname",
ClassicLoadBalancerLogField::ClientAndPort => "client_and_port",
ClassicLoadBalancerLogField::BackendAndPort => "backend_and_port",
ClassicLoadBalancerLogField::RequestProcessingTime => "request_processing_time",
ClassicLoadBalancerLogField::BackendProcessingTime => "backend_processing_time",
ClassicLoadBalancerLogField::ResponseProcessingTime => "response_processing_time",
ClassicLoadBalancerLogField::ELBStatusCode => "elb_status_code",
ClassicLoadBalancerLogField::BackendStatusCode => "backend_status_code",
ClassicLoadBalancerLogField::ReceivedBytes => "received_bytes",
ClassicLoadBalancerLogField::SentBytes => "sent_bytes",
ClassicLoadBalancerLogField::Request => "request",
ClassicLoadBalancerLogField::UserAgent => "user_agent",
ClassicLoadBalancerLogField::SSLCipher => "ssl_cipher",
ClassicLoadBalancerLogField::SSLProtocol => "ssl_protocol",
ClassicLoadBalancerLogField::TargetGroupArn => "target_group_arn",
ClassicLoadBalancerLogField::TraceID => "trace_id",
};
write!(f, "{}", name)
}
}
impl FromStr for ClassicLoadBalancerLogField {
type Err = String;
fn from_str(s: &str) -> result::Result<Self, Self::Err> {
match s {
"timestamp" => Ok(ClassicLoadBalancerLogField::Timestamp),
"elbname" => Ok(ClassicLoadBalancerLogField::Elbname),
"client_and_port" => Ok(ClassicLoadBalancerLogField::ClientAndPort),
"backend_and_port" => Ok(ClassicLoadBalancerLogField::BackendAndPort),
"request_processing_time" => Ok(ClassicLoadBalancerLogField::RequestProcessingTime),
"backend_processing_time" => Ok(ClassicLoadBalancerLogField::BackendProcessingTime),
"response_processing_time" => Ok(ClassicLoadBalancerLogField::ResponseProcessingTime),
"elb_status_code" => Ok(ClassicLoadBalancerLogField::ELBStatusCode),
"backend_status_code" => Ok(ClassicLoadBalancerLogField::BackendStatusCode),
"received_bytes" => Ok(ClassicLoadBalancerLogField::ReceivedBytes),
"sent_bytes" => Ok(ClassicLoadBalancerLogField::SentBytes),
"request" => Ok(ClassicLoadBalancerLogField::Request),
"user_agent" => Ok(ClassicLoadBalancerLogField::UserAgent),
"ssl_cipher" => Ok(ClassicLoadBalancerLogField::SSLCipher),
"ssl_protocol" => Ok(ClassicLoadBalancerLogField::SSLProtocol),
"target_group_arn" => Ok(ClassicLoadBalancerLogField::TargetGroupArn),
"trace_id" => Ok(ClassicLoadBalancerLogField::TraceID),
_ => Err("unknown column name".to_string()),
}
}
}
impl ClassicLoadBalancerLogField {
pub(crate) fn len() -> usize {
17
}
pub(crate) fn field_names<'a>() -> &'a Vec<String> {
&AWS_ELB_FIELD_NAMES
}
pub(crate) fn datatypes() -> Vec<DataType> {
AWS_ELB_DATATYPES.clone()
}
pub(crate) fn datatype(idx: usize) -> DataType {
AWS_ELB_DATATYPES[idx].clone()
}
pub(crate) fn schema() -> Vec<(String, DataType)> {
let fields = Self::field_names().clone();
let datatypes = Self::datatypes();
fields.into_iter().zip(datatypes.into_iter()).collect()
}
}
//Reference: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html
pub(crate) enum ApplicationLoadBalancerLogField {
Type = 0,
Timestamp = 1,
Elbname = 2,
ClientAndPort = 3,
TargetAndPort = 4,
RequestProcessingTime = 5,
TargetProcessingTime = 6,
ResponseProcessingTime = 7,
ELBStatusCode = 8,
TargetStatusCode = 9,
ReceivedBytes = 10,
SentBytes = 11,
Request = 12,
UserAgent = 13,
SSLCipher = 14,
SSLProtocol = 15,
TargetGroupArn = 16,
TraceID = 17,
DomainName = 18,
ChosenCertArn = 19,
MatchedRulePriority = 20,
RequestCreationTime = 21,
ActionExecuted = 22,
RedirectUrl = 23,
ErrorReason = 24,
}
impl FromStr for ApplicationLoadBalancerLogField {
type Err = String;
fn from_str(s: &str) -> result::Result<Self, Self::Err> {
match s {
"type" => Ok(ApplicationLoadBalancerLogField::Type),
"timestamp" => Ok(ApplicationLoadBalancerLogField::Timestamp),
"elbname" => Ok(ApplicationLoadBalancerLogField::Elbname),
"client_and_port" => Ok(ApplicationLoadBalancerLogField::ClientAndPort),
"backend_and_port" => Ok(ApplicationLoadBalancerLogField::TargetAndPort),
"request_processing_time" => Ok(ApplicationLoadBalancerLogField::RequestProcessingTime),
"backend_processing_time" => Ok(ApplicationLoadBalancerLogField::TargetProcessingTime),
"response_processing_time" => Ok(ApplicationLoadBalancerLogField::ResponseProcessingTime),
"elb_status_code" => Ok(ApplicationLoadBalancerLogField::ELBStatusCode),
"backend_status_code" => Ok(ApplicationLoadBalancerLogField::TargetStatusCode),
"received_bytes" => Ok(ApplicationLoadBalancerLogField::ReceivedBytes),
"sent_bytes" => Ok(ApplicationLoadBalancerLogField::SentBytes),
"request" => Ok(ApplicationLoadBalancerLogField::Request),
"user_agent" => Ok(ApplicationLoadBalancerLogField::UserAgent),
"ssl_cipher" => Ok(ApplicationLoadBalancerLogField::SSLCipher),
"ssl_protocol" => Ok(ApplicationLoadBalancerLogField::SSLProtocol),
"target_group_arn" => Ok(ApplicationLoadBalancerLogField::TargetGroupArn),
"trace_id" => Ok(ApplicationLoadBalancerLogField::TraceID),
"domain_name" => Ok(ApplicationLoadBalancerLogField::DomainName),
"chosen_cert_arn" => Ok(ApplicationLoadBalancerLogField::ChosenCertArn),
"matched_rule_priority" => Ok(ApplicationLoadBalancerLogField::MatchedRulePriority),
"request_creation_time" => Ok(ApplicationLoadBalancerLogField::RequestCreationTime),
"action_executed" => Ok(ApplicationLoadBalancerLogField::ActionExecuted),
"redirect_url" => Ok(ApplicationLoadBalancerLogField::RedirectUrl),
"error_reason" => Ok(ApplicationLoadBalancerLogField::ErrorReason),
_ => Err("unknown column name".to_string()),
}
}
}
impl ApplicationLoadBalancerLogField {
pub(crate) fn len() -> usize {
25
}
pub(crate) fn field_names<'a>() -> &'a Vec<String> {
&AWS_ALB_FIELD_NAMES
}
pub(crate) fn datatypes() -> Vec<DataType> {
AWS_ALB_DATATYPES.clone()
}
pub(crate) fn datatype(idx: usize) -> DataType {
AWS_ALB_DATATYPES[idx].clone()
}
pub(crate) fn schema() -> Vec<(String, DataType)> {
let fields = Self::field_names().clone();
let datatypes = Self::datatypes();
fields.into_iter().zip(datatypes.into_iter()).collect()
}
}
// https://docs.aws.amazon.com/AmazonS3/latest/dev/LogFormat.html
pub(crate) enum S3Field {
BucketOwner = 0,
Bucket = 1,
Time = 2,
RemoteIp = 3,
Requester = 4,
RequestId = 5,
Operation = 6,
Key = 7,
RequestUri = 8,
HttpStatus = 9,
ErrorCode = 10,
BytesSent = 11,
ObjectSize = 12,
TotalTime = 13,
TurnAroundTime = 14,
Referrer = 15,
UserAgent = 16,
VersionId = 17,
HostId = 18,
SignatureVersion = 19,
CipherSuite = 20,
AuthenticationType = 21,
HostHeader = 22,
TlsVersion = 23,
}
impl FromStr for S3Field {
type Err = String;
fn from_str(s: &str) -> result::Result<Self, Self::Err> {
match s {
"bucket_owner" => Ok(S3Field::BucketOwner),
"bucket" => Ok(S3Field::Bucket),
"time" => Ok(S3Field::Time),
"remote_ip" => Ok(S3Field::RemoteIp),
"requester" => Ok(S3Field::Requester),
"request_id" => Ok(S3Field::RequestId),
"operation" => Ok(S3Field::Operation),
"key" => Ok(S3Field::Key),
"request_uri" => Ok(S3Field::RequestUri),
"http_status" => Ok(S3Field::HttpStatus),
"error_code" => Ok(S3Field::ErrorCode),
"bytes_sent" => Ok(S3Field::BytesSent),
"object_size" => Ok(S3Field::ObjectSize),
"total_time" => Ok(S3Field::TotalTime),
"turn_around_time" => Ok(S3Field::TurnAroundTime),
"refererr" => Ok(S3Field::Referrer),
"user_agent" => Ok(S3Field::UserAgent),
"version_id" => Ok(S3Field::VersionId),
"host_id" => Ok(S3Field::HostId),
"signature_version" => Ok(S3Field::SignatureVersion),
"cipher_suite" => Ok(S3Field::CipherSuite),
"authentication_type" => Ok(S3Field::AuthenticationType),
"host_header" => Ok(S3Field::HostHeader),
"tls_version" => Ok(S3Field::TlsVersion),
_ => Err("unknown column name".to_string()),
}
}
}
impl S3Field {
pub(crate) fn len() -> usize {
24
}
pub(crate) fn field_names<'a>() -> &'a Vec<String> {
&AWS_S3_FIELD_NAMES
}
pub(crate) fn datatypes() -> Vec<DataType> {
AWS_S3_DATATYPES.clone()
}
pub(crate) fn datatype(idx: usize) -> DataType {
AWS_S3_DATATYPES[idx].clone()
}
pub(crate) fn schema() -> Vec<(String, DataType)> {
let fields = Self::field_names().clone();
let datatypes = Self::datatypes();
fields.into_iter().zip(datatypes.into_iter()).collect()
}
}
//Reference: https://wiki.squid-cache.org/Features/LogFormat
pub(crate) enum SquidLogField {
Timestamp = 0,
Elapsed = 1,
RemoteHost = 2,
CodeAndStatus = 3,
Bytes = 4,
Method = 5,
Url = 6,
Rfc931 = 7,
PeerstatusAndPeerhost = 8,
Type = 9,
}
impl FromStr for SquidLogField {
type Err = String;
fn from_str(s: &str) -> result::Result<Self, Self::Err> {
match s {
"timestamp" => Ok(SquidLogField::Timestamp),
"elapsed" => Ok(SquidLogField::Elapsed),
"remote_host" => Ok(SquidLogField::RemoteHost),
"code_and_status" => Ok(SquidLogField::CodeAndStatus),
"bytes" => Ok(SquidLogField::Bytes),
"method" => Ok(SquidLogField::Method),
"url" => Ok(SquidLogField::Url),
"rfc931" => Ok(SquidLogField::Rfc931),
"peer_status_and_peer_host" => Ok(SquidLogField::PeerstatusAndPeerhost),
"type" => Ok(SquidLogField::Type),
_ => Err("unknown column name".to_string()),
}
}
}
impl SquidLogField {
pub(crate) fn len() -> usize {
10
}
pub(crate) fn field_names<'a>() -> &'a Vec<String> {
&SQUID_FIELD_NAMES
}
pub(crate) fn datatypes() -> Vec<DataType> {
SQUID_DATATYPES.clone()
}
pub(crate) fn datatype(idx: usize) -> DataType {
SQUID_DATATYPES[idx].clone()
}
pub(crate) fn schema() -> Vec<(String, DataType)> {
let fields = Self::field_names().clone();
let datatypes = Self::datatypes();
fields.into_iter().zip(datatypes.into_iter()).collect()
}
}
pub(crate) type ReaderResult<T> = result::Result<T, ReaderError>;
#[derive(Fail, Debug)]
pub(crate) enum ReaderError {
#[fail(display = "{}", _0)]
Io(#[cause] io::Error),
#[fail(display = "{}", _0)]
ParseDateTime(#[cause] chrono::format::ParseError),
#[fail(display = "{}", _0)]
ParseIntegral(#[cause] std::num::ParseIntError),
#[fail(display = "{}", _0)]
ParseFloat(#[cause] std::num::ParseFloatError),
#[fail(display = "{}", _0)]
ParseUrl(#[cause] url::ParseError),
#[fail(display = "{}", _0)]
ParseHost(#[cause] common::types::ParseHostError),
#[fail(display = "{}", _0)]
ParseHttpRequest(#[cause] common::types::ParseHttpRequestError),
#[fail(display = "{}", _0)]
ParseJson(#[cause] json::JsonError),
}
impl From<io::Error> for ReaderError {
fn from(err: io::Error) -> ReaderError {
ReaderError::Io(err)
}
}
impl From<chrono::format::ParseError> for ReaderError {
fn from(err: chrono::format::ParseError) -> ReaderError {
ReaderError::ParseDateTime(err)
}
}
impl From<std::num::ParseIntError> for ReaderError {
fn from(err: std::num::ParseIntError) -> ReaderError {
ReaderError::ParseIntegral(err)
}
}
impl From<std::num::ParseFloatError> for ReaderError {
fn from(err: std::num::ParseFloatError) -> ReaderError {
ReaderError::ParseFloat(err)
}
}
impl From<common::types::ParseHostError> for ReaderError {
fn from(err: common::types::ParseHostError) -> ReaderError {
ReaderError::ParseHost(err)
}
}
impl From<common::types::ParseHttpRequestError> for ReaderError {
fn from(err: common::types::ParseHttpRequestError) -> ReaderError {
ReaderError::ParseHttpRequest(err)
}
}
impl From<url::ParseError> for ReaderError {
fn from(err: url::ParseError) -> ReaderError {
ReaderError::ParseUrl(err)
}
}
impl From<json::JsonError> for ReaderError {
fn from(err: json::JsonError) -> ReaderError {
ReaderError::ParseJson(err)
}
}
#[derive(Debug)]
pub(crate) struct ReaderBuilder {
capacity: usize,
file_format: String,
}
pub(crate) trait RecordRead {
fn read_record(&mut self) -> ReaderResult<Option<Record>>;
}
impl ReaderBuilder {
pub(crate) fn new(file_format: String) -> Self {
ReaderBuilder {
capacity: 8 * (1 << 10),
file_format: file_format,
}
}
pub(crate) fn with_path<P: AsRef<Path>>(&self, path: P) -> ReaderResult<Reader<File>> {
Ok(Reader::new(self, File::open(path)?, self.file_format.clone()))
}
#[allow(dead_code)]
pub(crate) fn with_reader<R: io::Read>(&self, rdr: R) -> Reader<R> {
Reader::new(self, rdr, self.file_format.clone())
}
}
fn json_to_data_model(parsed: &JsonValue) -> Value {
match parsed {
json::JsonValue::Object(o) => {
let t: LinkedHashMap<String, Value> =
o.iter().map(|(k, v)| (k.to_string(), json_to_data_model(v))).collect();
Value::Object(t)
}
json::JsonValue::Array(a) => {
let a: Vec<Value> = a.iter().map(|v| json_to_data_model(v)).collect();
Value::Array(a)
}
json::JsonValue::Null => Value::Null,
json::JsonValue::String(s) => Value::String(s.clone()),
json::JsonValue::Short(s) => Value::String(s.to_string()),
json::JsonValue::Boolean(b) => Value::Boolean(*b),
json::JsonValue::Number(n) => {
let fixed = n.as_fixed_point_i64(4).unwrap();
if fixed % 10000 == 0 {
Value::Int((fixed / 10000) as i32)
} else {
let f: f32 = fixed as f32 / 10000.0;
Value::Float(OrderedFloat::from(f))
}
}
}
}
#[derive(Debug)]
pub(crate) struct Reader<R> {
rdr: io::BufReader<R>,
file_format: String,
}
impl<R: io::Read> Reader<R> {
pub(crate) fn new(builder: &ReaderBuilder, rdr: R, file_format: String) -> Reader<R> {
Reader {
rdr: io::BufReader::with_capacity(builder.capacity, rdr),
file_format,
}
}
#[allow(dead_code)]
fn close(&self) {}
}
impl<R: io::Read> RecordRead for Reader<R> {
fn read_record(&mut self) -> ReaderResult<Option<Record>> {
let mut buf = String::new();
let more_data = self.rdr.read_line(&mut buf)?;
if more_data > 0 && self.file_format != "jsonl" {
let field_names = if self.file_format == "elb" {
ClassicLoadBalancerLogField::field_names()
} else if self.file_format == "alb" {
ApplicationLoadBalancerLogField::field_names()
} else if self.file_format == "s3" {
S3Field::field_names()
} else {
SquidLogField::field_names()
};
let mut record_vars = common::types::Variables::default();
let mut value_cnt: usize = 0;
for (i, m) in SPLIT_READER_LINE_REGEX.find_iter(&buf).enumerate() {
if self.file_format == "elb" {
if i >= ClassicLoadBalancerLogField::len() {
break;
}
} else if self.file_format == "alb" {
if i >= ApplicationLoadBalancerLogField::len() {
break;
}
} else if self.file_format == "squid" {
if i >= SquidLogField::len() {
break;
}
} else if self.file_format == "s3" {
if i >= S3Field::len() {
break;
}
} else {
unreachable!();
}
let s = m.as_str();
let datatype = if self.file_format == "elb" {
ClassicLoadBalancerLogField::datatype(i)
} else if self.file_format == "alb" {
ApplicationLoadBalancerLogField::datatype(i)
} else if self.file_format == "s3" {
S3Field::datatype(i)
} else {
SquidLogField::datatype(i)
};
match datatype {
DataType::DateTime => {
let dt = chrono::DateTime::parse_from_rfc3339(s)?;
record_vars.insert(field_names[i].clone(), Value::DateTime(dt));
}
DataType::String => {
record_vars.insert(field_names[i].clone(), Value::String(s.to_string()));
}
DataType::Integral => {
let i_val = s.parse::<i32>()?;
record_vars.insert(field_names[i].clone(), Value::Int(i_val));
}
DataType::Float => {
let f = s.parse::<f32>()?;
record_vars.insert(field_names[i].clone(), Value::Float(OrderedFloat::from(f)));
}
DataType::Host => {
if s == "-" {
record_vars.insert(field_names[i].clone(), Value::Null);
} else {
let host = common::types::parse_host(s)?;
record_vars.insert(field_names[i].clone(), Value::Host(host));
}
}
DataType::HttpRequest => {
let s = s.trim_matches('"');
let request = common::types::parse_http_request(s)?;
record_vars.insert(field_names[i].clone(), Value::HttpRequest(request));
}
}
value_cnt += 1;
}
//Adjust the width to be the same
while value_cnt < field_names.len() {
record_vars.insert(field_names[value_cnt].clone(), Value::Null);
value_cnt += 1;
}
let record = Record::new_with_variables(record_vars);
Ok(Some(record))
} else if more_data > 0 && self.file_format == "jsonl" {
let parsed = json::parse(&buf)?;
let data_model = json_to_data_model(&parsed);
match data_model {
Value::Object(o) => {
let record = Record::new_with_variables(o);
Ok(Some(record))
}
_ => {
unimplemented!("Array or value on the first layer is not supported yet")
}
}
} else {
Ok(None)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common;
use chrono;
use std::io::BufReader;
use std::str::FromStr;
#[test]
fn test_aws_elb_reader() {
let content = r#"2015-11-07T18:45:33.559871Z elb1 78.168.134.92:4586 10.0.0.215:80 0.000036 0.001035 0.000025 200 200 0 42355 "GET https://example.com:443/ HTTP/1.1" "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2"#;
let mut reader = ReaderBuilder::new("elb".to_string()).with_reader(BufReader::new(content.as_bytes()));
let record = reader.read_record().unwrap();
let fields = ClassicLoadBalancerLogField::field_names();
let data = vec![
Value::DateTime(chrono::DateTime::parse_from_rfc3339("2015-11-07T18:45:33.559871Z").unwrap()),
Value::String("elb1".to_string()),
Value::Host(common::types::parse_host("78.168.134.92:4586").unwrap()),
Value::Host(common::types::parse_host("10.0.0.215:80").unwrap()),
Value::Float(OrderedFloat::from(0.000_036)),
Value::Float(OrderedFloat::from(0.001_035)),
Value::Float(OrderedFloat::from(0.000_025)),
Value::String("200".to_string()),
Value::String("200".to_string()),
Value::Int(0),
Value::Int(42355),
Value::HttpRequest(common::types::parse_http_request("GET https://example.com:443/ HTTP/1.1").unwrap()),
Value::String("\"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36\"".to_string()),
Value::String("ECDHE-RSA-AES128-GCM-SHA256".to_string()),
Value::String("TLSv1.2".to_string()),
Value::Null,
Value::Null
];
let expected: Option<Record> = Some(Record::new(fields, data));
assert_eq!(expected, record);
let content = r#"2015-11-07T18:45:37.691548Z elb1 176.219.166.226:48384 10.0.2.143:80 0.000023 0.000348 0.000025 200 200 0 41690 "GET http://example.com:80/?mode=json&after=&iteration=1 HTTP/1.1" "Mozilla/5.0 (Linux; Android 5.1.1; Nexus 5 Build/LMY48I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/46.0.2490.76 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/52.0.0.12.18;]" - - arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337262-36d228ad5d99923122bbe354""#;
let mut reader = ReaderBuilder::new("elb".to_string()).with_reader(BufReader::new(content.as_bytes()));
let record = reader.read_record().unwrap();
let fields = ClassicLoadBalancerLogField::field_names();
let data = vec![
Value::DateTime(chrono::DateTime::parse_from_rfc3339("2015-11-07T18:45:37.691548Z").unwrap()),
Value::String("elb1".to_string()),
Value::Host(common::types::parse_host("176.219.166.226:48384").unwrap()),
Value::Host(common::types::parse_host("10.0.2.143:80").unwrap()),
Value::Float(OrderedFloat::from(0.000_023)),
Value::Float(OrderedFloat::from(0.000_348)),
Value::Float(OrderedFloat::from(0.000_025)),
Value::String("200".to_string()),
Value::String("200".to_string()),
Value::Int(0),
Value::Int(41690),
Value::HttpRequest(common::types::parse_http_request("GET http://example.com:80/?mode=json&after=&iteration=1 HTTP/1.1").unwrap()),
Value::String("\"Mozilla/5.0 (Linux; Android 5.1.1; Nexus 5 Build/LMY48I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/46.0.2490.76 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/52.0.0.12.18;]\"".to_string()),
Value::String("-".to_string()),
Value::String("-".to_string()),
Value::String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067".to_string()),
Value::String("\"Root=1-58337262-36d228ad5d99923122bbe354\"".to_string()),
];
let expected: Option<Record> = Some(Record::new(fields, data));
assert_eq!(expected, record)
}
#[test]
fn test_aws_alb_reader() {
let content = r#"http 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 10.0.0.1:80 0.000 0.001 0.000 200 200 34 366 "GET http://www.example.com:80/ HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337262-36d228ad5d99923122bbe354" "-" "-" 0 2018-07-02T22:22:48.364000Z "forward" "-" "-""#;
let mut reader = ReaderBuilder::new("alb".to_string()).with_reader(BufReader::new(content.as_bytes()));
let record = reader.read_record().unwrap();
let fields = ApplicationLoadBalancerLogField::field_names();
let data = vec![
Value::String("http".to_string()),
Value::DateTime(chrono::DateTime::parse_from_rfc3339("2018-07-02T22:23:00.186641Z").unwrap()),
Value::String("app/my-loadbalancer/50dc6c495c0c9188".to_string()),
Value::Host(common::types::parse_host("192.168.131.39:2817").unwrap()),
Value::Host(common::types::parse_host("10.0.0.1:80").unwrap()),
Value::Float(OrderedFloat::from(0.000)),
Value::Float(OrderedFloat::from(0.001)),
Value::Float(OrderedFloat::from(0.000)),
Value::String("200".to_string()),
Value::String("200".to_string()),
Value::Int(34),
Value::Int(366),
Value::HttpRequest(common::types::parse_http_request("GET http://www.example.com:80/ HTTP/1.1").unwrap()),
Value::String("\"curl/7.46.0\"".to_string()),
Value::String("-".to_string()),
Value::String("-".to_string()),
Value::String(
"arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"
.to_string(),
),
Value::String("\"Root=1-58337262-36d228ad5d99923122bbe354\"".to_string()),
Value::String("\"-\"".to_string()),
Value::String("\"-\"".to_string()),
Value::String("0".to_string()),
Value::String("2018-07-02T22:22:48.364000Z".to_string()),
Value::String("\"forward\"".to_string()),
Value::String("\"-\"".to_string()),
Value::String("\"-\"".to_string()),
];
let expected: Option<Record> = Some(Record::new(fields, data));
assert_eq!(expected, record);
}
#[test]
fn test_aws_s3_reader() {
let content = r#"79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be awsexamplebucket [06/Feb/2019:00:00:38 +0000] 192.0.2.3 79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be 3E57427F3EXAMPLE REST.GET.VERSIONING - "GET /awsexamplebucket?versioning HTTP/1.1" 200 - 113 - 7 - "-" "S3Console/0.4" - s9lzHYrFp76ZVxRcpX9+5cjAnEH2ROuNkd2BHfIa6UkFVdtjf5mKR3/eTPFvsiP/XV/VLi31234= SigV2 ECDHE-RSA-AES128-GCM-SHA256 AuthHeader awsexamplebucket.s3.amazonaws.com TLSV1.1"#;
let mut reader = ReaderBuilder::new("s3".to_string()).with_reader(BufReader::new(content.as_bytes()));
let record = reader.read_record().unwrap();
let fields = S3Field::field_names();
let data = vec![
Value::String("79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be".to_string()),
Value::String("awsexamplebucket".to_string()),
Value::String("[06/Feb/2019:00:00:38 +0000]".to_string()),
Value::String("192.0.2.3".to_string()),
Value::String("79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be".to_string()),
Value::String("3E57427F3EXAMPLE".to_string()),
Value::String("REST.GET.VERSIONING".to_string()),
Value::String("-".to_string()),
Value::String("\"GET /awsexamplebucket?versioning HTTP/1.1\"".to_string()),
Value::String("200".to_string()),
Value::String("-".to_string()),
Value::String("113".to_string()),
Value::String("-".to_string()),
Value::String("7".to_string()),
Value::String("-".to_string()),
Value::String("\"-\"".to_string()),
Value::String("\"S3Console/0.4\"".to_string()),
Value::String("-".to_string()),
Value::String("s9lzHYrFp76ZVxRcpX9+5cjAnEH2ROuNkd2BHfIa6UkFVdtjf5mKR3/eTPFvsiP/XV/VLi31234=".to_string()),
Value::String("SigV2".to_string()),
Value::String("ECDHE-RSA-AES128-GCM-SHA256".to_string()),
Value::String("AuthHeader".to_string()),
Value::String("awsexamplebucket.s3.amazonaws.com".to_string()),
Value::String("TLSV1.1".to_string()),
];
let expected: Option<Record> = Some(Record::new(fields, data));
assert_eq!(expected, record);
}
#[test]
fn test_squid_reader() {
let content = r#"1515734740.494 1 [MASKEDIPADDRESS] TCP_DENIED/407 3922 CONNECT d.dropbox.com:443 - HIER_NONE/- text/html"#;
let mut reader = ReaderBuilder::new("squid".to_string()).with_reader(BufReader::new(content.as_bytes()));
let record = reader.read_record().unwrap();
let fields = SquidLogField::field_names();
let data = vec![
Value::String("1515734740.494".to_string()),
Value::String("1".to_string()),
Value::String("[MASKEDIPADDRESS]".to_string()),
Value::String("TCP_DENIED/407".to_string()),
Value::String("3922".to_string()),
Value::String("CONNECT".to_string()),
Value::String("d.dropbox.com:443".to_string()),
Value::String("-".to_string()),
Value::String("HIER_NONE/-".to_string()),
Value::String("text/html".to_string()),
];
let expected: Option<Record> = Some(Record::new(fields, data));
assert_eq!(expected, record);
}
#[test]
fn test_reader_on_empty_input() {
let content = r#" \n "#;
let mut reader = ReaderBuilder::new("elb".to_string()).with_reader(BufReader::new(content.as_bytes()));
let record = reader.read_record();
assert_eq!(record.is_err(), true)
}
#[test]
fn test_reader_on_malformed_input() {
let content = r#"2015-11-07T18:45:37.691548Z elb1 176.219.166.226:48384 10.0.2.143:80 0.000 on=1 HTTP/1.1" "Mozilla/5.0 (Linux; Android 5.137.36 (KHTML, like Gecko) Version/4.0 Chrome/46.0.2490.76 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/52.0.0.12.18;]" - - arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337262-36d228ad5d99923122bbe354""#;
let mut reader = ReaderBuilder::new("elb".to_string()).with_reader(BufReader::new(content.as_bytes()));
let record = reader.read_record();
assert_eq!(record.is_err(), true)
}
#[test]
fn test_idempotent_property() {
for field_name in ClassicLoadBalancerLogField::field_names().iter() {
let field_enum = ClassicLoadBalancerLogField::from_str(field_name).unwrap();
let format_field_name = format!("{}", field_enum);
assert_eq!(&format_field_name, field_name)
}
}
}
|
#![macro_use]
use std::cell::{Ref, RefCell, RefMut};
use std::clone::Clone;
use std::iter::Peekable;
use std::rc::Rc;
macro_rules! log_debug {
($fmt:expr) => (
#[cfg(debug_assertions)]
println!($fmt));
($fmt:expr, $($arg:tt)*) => (
#[cfg(debug_assertions)]
println!($fmt, $($arg)*));
}
/// Macro to create String -> T hash map from list of static string and values
macro_rules! string_hash_map {
[$(($key: expr, $value: expr)), *,] => ({
let mut result = HashMap::new();
$(
result.insert($key.to_string(), $value)
); *;
result
})
}
/// Iterator to advance iterator until next value meets requirements
pub struct ExclusiveTakeWhile<'a, T, P>
where
T: 'a,
T: Iterator,
{
iter: &'a mut Peekable<T>,
pred: P,
}
impl<'a, T: Iterator, P> Iterator for ExclusiveTakeWhile<'a, T, P>
where
P: FnMut(&T::Item) -> bool,
{
type Item = T::Item;
fn next(&mut self) -> Option<Self::Item> {
if if let Some(val) = self.iter.peek() {
(self.pred)(val)
} else {
false
} {
self.iter.next()
} else {
None
}
}
}
// Add function to Peekable
pub trait AsExclusiveTakeWhile<'a, T>
where
T: Iterator,
{
fn take_while_exclusive<P>(self, predicate: P) -> ExclusiveTakeWhile<'a, T, P>
where
P: FnMut(&T::Item) -> bool;
}
impl<'a, T> AsExclusiveTakeWhile<'a, T> for &'a mut Peekable<T>
where
T: Iterator,
{
fn take_while_exclusive<P>(self, predicate: P) -> ExclusiveTakeWhile<'a, T, P> {
ExclusiveTakeWhile::<T, P> {
iter: self,
pred: predicate,
}
}
}
#[derive(Debug)]
pub struct Shared<T> {
data: Rc<RefCell<T>>,
}
impl<T> Clone for Shared<T> {
fn clone(&self) -> Self {
Shared {
data: self.data.clone(),
}
}
}
impl<T> Shared<T> {
pub fn new(data: T) -> Self {
Shared {
data: Rc::new(RefCell::new(data)),
}
}
pub fn borrow(&self) -> Ref<T> {
self.data.borrow()
}
pub fn borrow_mut(&self) -> RefMut<T> {
self.data.borrow_mut()
}
}
|
//! Connect to or provide Fuchsia services.
// Copyright 2017 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![deny(warnings)]
#![deny(missing_docs)]
extern crate fuchsia_async as async;
extern crate fuchsia_zircon as zx;
extern crate mxruntime;
extern crate fdio;
#[macro_use] extern crate failure;
extern crate fidl;
extern crate futures;
// Generated FIDL bindings
extern crate fidl_fuchsia_sys;
use fidl_fuchsia_sys::{
ComponentControllerProxy,
LauncherMarker,
LauncherProxy,
LaunchInfo,
};
#[allow(unused_imports)]
use fidl::endpoints2::{ServiceMarker, Proxy};
#[allow(unused_imports)]
use failure::{Error, ResultExt, Fail};
use futures::prelude::*;
use futures::stream::FuturesUnordered;
/// Tools for starting or connecting to existing Fuchsia applications and services.
pub mod client {
use super::*;
#[inline]
/// Connect to a FIDL service using the application root namespace.
pub fn connect_to_service<S: ServiceMarker>()
-> Result<S::Proxy, Error>
{
let (proxy, server) = zx::Channel::create()?;
let service_path = format!("/svc/{}", S::NAME);
fdio::service_connect(&service_path, server)
.with_context(|_| format!("Error connecting to service path: {}", service_path))?;
let proxy = async::Channel::from_channel(proxy)?;
Ok(S::Proxy::from_channel(proxy))
}
/// Launcher launches Fuchsia applications.
pub struct Launcher {
launcher: LauncherProxy,
}
impl Launcher {
#[inline]
/// Create a new application launcher.
pub fn new() -> Result<Self, Error> {
let launcher = connect_to_service::<LauncherMarker>()?;
Ok(Launcher { launcher })
}
/// Launch an application at the specified URL.
pub fn launch(
&self,
url: String,
arguments: Option<Vec<String>>,
) -> Result<App, Error>
{
let (controller, controller_server_end) = zx::Channel::create()?;
let (directory_request, directory_server_chan) = zx::Channel::create()?;
let mut launch_info = LaunchInfo {
url,
arguments,
out: None,
err: None,
directory_request: Some(directory_server_chan),
flat_namespace: None,
additional_services: None,
};
self.launcher
.create_component(&mut launch_info, Some(controller_server_end.into()))
.context("Failed to start a new Fuchsia application.")?;
let controller = async::Channel::from_channel(controller)?;
let controller = ComponentControllerProxy::new(controller);
Ok(App { directory_request, controller })
}
}
/// `App` represents a launched application.
pub struct App {
// directory_request is a directory protocol channel
directory_request: zx::Channel,
// TODO: use somehow?
#[allow(dead_code)]
controller: ComponentControllerProxy,
}
impl App {
#[inline]
/// Connect to a service provided by the `App`.
pub fn connect_to_service<S: ServiceMarker>(&self, service: S)
-> Result<S::Proxy, Error>
{
let (client_channel, server_channel) = zx::Channel::create()?;
self.pass_to_service(service, server_channel)?;
Ok(S::Proxy::from_channel(async::Channel::from_channel(client_channel)?))
}
/// Connect to a service by passing a channel for the server.
pub fn pass_to_service<S: ServiceMarker>(&self, _: S, server_channel: zx::Channel)
-> Result<(), Error>
{
fdio::service_connect_at(&self.directory_request, S::NAME, server_channel)?;
Ok(())
}
}
}
/// Tools for providing Fuchsia services.
pub mod server {
use super::*;
use futures::{Future, Poll};
use self::errors::*;
/// New root-level errors that may occur when using the `fuchsia_component::server` module.
/// Note that these are not the only kinds of errors that may occur: errors the module
/// may also be caused by `fidl::Error` or `zircon::Status`.
pub mod errors {
/// The startup handle on which the FIDL server attempted to start was missing.
#[derive(Debug, Fail)]
#[fail(display = "The startup handle on which the FIDL server attempted to start was missing.")]
pub struct MissingStartupHandle;
}
/// `ServiceFactory` lazily creates instances of services.
///
/// Note that this trait is implemented by `FnMut` closures like `|| MyService { ... }`.
pub trait ServiceFactory: Send + 'static {
/// The path name of a service.
///
/// Used by the `FdioServer` to know which service to connect incoming requests to.
fn service_name(&self) -> &str;
/// Create a `fidl::Stub` service.
// TODO(cramertj): allow `spawn` calls to fail.
fn spawn_service(&mut self, channel: async::Channel);
}
impl<F> ServiceFactory for (&'static str, F)
where F: FnMut(async::Channel) + Send + 'static,
{
fn service_name(&self) -> &str {
self.0
}
fn spawn_service(&mut self, channel: async::Channel) {
(self.1)(channel)
}
}
/// `ServicesServer` is a server which manufactures service instances of varying types on demand.
/// To run a `ServicesServer`, use `Server::new`.
pub struct ServicesServer {
services: Vec<Box<ServiceFactory>>,
}
impl ServicesServer {
/// Create a new `ServicesServer` which doesn't provide any services.
pub fn new() -> Self {
ServicesServer { services: vec![] }
}
/// Add a service to the `ServicesServer`.
pub fn add_service<S: ServiceFactory>(mut self, service_factory: S) -> Self {
self.services.push(Box::new(service_factory));
self
}
/// Start serving directory protocol service requests on the process PA_DIRECTORY_REQUEST handle
pub fn start(self) -> Result<FdioServer, Error> {
let fdio_handle = mxruntime::get_startup_handle(mxruntime::HandleType::DirectoryRequest)
.ok_or(MissingStartupHandle)?;
let fdio_channel = async::Channel::from_channel(fdio_handle.into())?;
let mut server = FdioServer{
readers: FuturesUnordered::new(),
factories: self.services,
};
server.serve_channel(fdio_channel);
Ok(server)
}
}
/// `FdioServer` is a very basic vfs directory server that only responds to
/// OPEN and CLONE messages. OPEN always connects the client channel to a
/// newly spawned fidl service produced by the factory F.
#[must_use = "futures must be polled"]
pub struct FdioServer {
readers: FuturesUnordered<async::RecvMsg<zx::MessageBuf>>,
factories: Vec<Box<ServiceFactory>>,
}
impl FdioServer {
fn dispatch(&mut self, chan: &async::Channel, buf: zx::MessageBuf) -> zx::MessageBuf {
// TODO(raggi): provide an alternative to the into() here so that we
// don't need to pass the buf in owned back and forward.
let mut msg: fdio::rio::Message = buf.into();
// open & clone use a different reply channel
//
// Note: msg.validate() ensures that open must have exactly one
// handle, but the message may yet be invalid.
let reply_channel = match msg.op() {
fdio::fdio_sys::ZXRIO_OPEN |
fdio::fdio_sys::ZXRIO_CLONE => {
msg.take_handle(0).map(zx::Channel::from)
}
_ => None,
};
let validation = msg.validate();
if validation.is_err() ||
(
msg.op() != fdio::fdio_sys::ZXRIO_OPEN &&
msg.op() != fdio::fdio_sys::ZXRIO_CLONE
) ||
msg.is_describe() ||
!reply_channel.is_some()
{
eprintln!(
"service request channel received invalid/unsupported zxrio request: {:?}",
&msg
);
if msg.is_describe() {
let reply_channel = reply_channel.as_ref().unwrap_or(chan.as_ref());
let reply_err = validation.err().unwrap_or(zx::Status::NOT_SUPPORTED);
fdio::rio::write_object(reply_channel, reply_err, 0, &[], &mut vec![])
.unwrap_or_else(|e| {
eprintln!("service request reply write failed with {:?}", e)
});
}
return msg.into();
}
if msg.op() == fdio::fdio_sys::ZXRIO_CLONE {
if let Some(c) = reply_channel {
if let Ok(fdio_chan) = async::Channel::from_channel(c) {
self.serve_channel(fdio_chan);
}
}
return msg.into();
}
let service_channel = reply_channel.unwrap();
let service_channel = async::Channel::from_channel(service_channel).unwrap();
// TODO(raggi): re-arrange things to avoid the copy here
let path = std::str::from_utf8(msg.data()).unwrap().to_owned();
if path == "public" {
self.serve_channel(service_channel);
return msg.into();
}
match self.factories.iter_mut().find(|factory| factory.service_name() == path) {
Some(factory) => factory.spawn_service(service_channel),
None => eprintln!("No service found for path {}", path),
}
msg.into()
}
fn serve_channel(&mut self, chan: async::Channel) {
let rmsg = chan.recv_msg(zx::MessageBuf::new());
self.readers.push(rmsg);
}
}
impl Future for FdioServer {
type Item = ();
type Error = Error;
fn poll(&mut self, cx: &mut task::Context) -> Poll<Self::Item, Self::Error> {
loop {
match self.readers.poll_next(cx) {
Ok(Async::Ready(Some((chan, buf)))) => {
let buf = self.dispatch(&chan, buf);
self.readers.push(chan.recv_msg(buf));
},
Ok(Async::Ready(None)) | Ok(Async::Pending) => return Ok(Async::Pending),
Err(_) => {
// errors are ignored, as we assume that the channel should still be read from.
},
}
}
}
}
}
|
// implements the background segmentation video submodule
pub struct BackgroundSegmenter {
// previous frame
// probability map
}
impl BackgroundSegmenter {
pub fn update(self/*Image*/) {
}
pub fn get_probability_map(self) /*-> image*/ {
}
} |
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - GPIO port mode register"]
pub gpiog_moder: GPIOG_MODER,
#[doc = "0x04 - GPIO port output type register"]
pub gpiog_otyper: GPIOG_OTYPER,
#[doc = "0x08 - GPIO port output speed register"]
pub gpiog_ospeedr: GPIOG_OSPEEDR,
#[doc = "0x0c - GPIO port pull-up/pull-down register"]
pub gpiog_pupdr: GPIOG_PUPDR,
#[doc = "0x10 - GPIO port input data register"]
pub gpiog_idr: GPIOG_IDR,
#[doc = "0x14 - GPIO port output data register"]
pub gpiog_odr: GPIOG_ODR,
#[doc = "0x18 - GPIO port bit set/reset register"]
pub gpiog_bsrr: GPIOG_BSRR,
#[doc = "0x1c - This register is used to lock the configuration of the port bits when a correct write sequence is applied to bit 16 (LCKK). The value of bits \\[15:0\\]
is used to lock the configuration of the GPIO. During the write sequence, the value of LCKR\\[15:0\\]
must not change. When the LOCK sequence has been applied on a port bit, the value of this port bit can no longer be modified until the next MCU reset or peripheral reset. A specific write sequence is used to write to the GPIOx_LCKR register. Only word access (32-bit long) is allowed during this locking sequence. Each lock bit freezes a specific configuration register (control and alternate function registers)."]
pub gpiog_lckr: GPIOG_LCKR,
#[doc = "0x20 - GPIO alternate function low register"]
pub gpiog_afrl: GPIOG_AFRL,
#[doc = "0x24 - GPIO alternate function high register"]
pub gpiog_afrh: GPIOG_AFRH,
#[doc = "0x28 - GPIO port bit reset register"]
pub gpiog_brr: GPIOG_BRR,
_reserved11: [u8; 0x039c],
#[doc = "0x3c8 - For GPIOA, B, C, D, E, F, G, H, I, J and GPIOK: For GPIOZ:"]
pub gpiog_hwcfgr10: GPIOG_HWCFGR10,
#[doc = "0x3cc - For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:"]
pub gpiog_hwcfgr9: GPIOG_HWCFGR9,
#[doc = "0x3d0 - For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:"]
pub gpiog_hwcfgr8: GPIOG_HWCFGR8,
#[doc = "0x3d4 - GPIO hardware configuration register 7"]
pub gpiog_hwcfgr7: GPIOG_HWCFGR7,
#[doc = "0x3d8 - GPIO hardware configuration register 6"]
pub gpiog_hwcfgr6: GPIOG_HWCFGR6,
#[doc = "0x3dc - GPIO hardware configuration register 5"]
pub gpiog_hwcfgr5: GPIOG_HWCFGR5,
#[doc = "0x3e0 - GPIO hardware configuration register 4"]
pub gpiog_hwcfgr4: GPIOG_HWCFGR4,
#[doc = "0x3e4 - GPIO hardware configuration register 3"]
pub gpiog_hwcfgr3: GPIOG_HWCFGR3,
#[doc = "0x3e8 - GPIO hardware configuration register 2"]
pub gpiog_hwcfgr2: GPIOG_HWCFGR2,
#[doc = "0x3ec - GPIO hardware configuration register 1"]
pub gpiog_hwcfgr1: GPIOG_HWCFGR1,
#[doc = "0x3f0 - GPIO hardware configuration register 0"]
pub gpiog_hwcfgr0: GPIOG_HWCFGR0,
#[doc = "0x3f4 - GPIO version register"]
pub gpiog_verr: GPIOG_VERR,
#[doc = "0x3f8 - GPIO identification register"]
pub gpiog_ipidr: GPIOG_IPIDR,
#[doc = "0x3fc - GPIO size identification register"]
pub gpiog_sidr: GPIOG_SIDR,
}
#[doc = "GPIOG_MODER (rw) register accessor: GPIO port mode register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_moder::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpiog_moder::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_moder`]
module"]
pub type GPIOG_MODER = crate::Reg<gpiog_moder::GPIOG_MODER_SPEC>;
#[doc = "GPIO port mode register"]
pub mod gpiog_moder;
#[doc = "GPIOG_OTYPER (rw) register accessor: GPIO port output type register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_otyper::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpiog_otyper::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_otyper`]
module"]
pub type GPIOG_OTYPER = crate::Reg<gpiog_otyper::GPIOG_OTYPER_SPEC>;
#[doc = "GPIO port output type register"]
pub mod gpiog_otyper;
#[doc = "GPIOG_OSPEEDR (rw) register accessor: GPIO port output speed register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_ospeedr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpiog_ospeedr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_ospeedr`]
module"]
pub type GPIOG_OSPEEDR = crate::Reg<gpiog_ospeedr::GPIOG_OSPEEDR_SPEC>;
#[doc = "GPIO port output speed register"]
pub mod gpiog_ospeedr;
#[doc = "GPIOG_PUPDR (rw) register accessor: GPIO port pull-up/pull-down register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_pupdr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpiog_pupdr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_pupdr`]
module"]
pub type GPIOG_PUPDR = crate::Reg<gpiog_pupdr::GPIOG_PUPDR_SPEC>;
#[doc = "GPIO port pull-up/pull-down register"]
pub mod gpiog_pupdr;
#[doc = "GPIOG_IDR (r) register accessor: GPIO port input data register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_idr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_idr`]
module"]
pub type GPIOG_IDR = crate::Reg<gpiog_idr::GPIOG_IDR_SPEC>;
#[doc = "GPIO port input data register"]
pub mod gpiog_idr;
#[doc = "GPIOG_ODR (rw) register accessor: GPIO port output data register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_odr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpiog_odr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_odr`]
module"]
pub type GPIOG_ODR = crate::Reg<gpiog_odr::GPIOG_ODR_SPEC>;
#[doc = "GPIO port output data register"]
pub mod gpiog_odr;
#[doc = "GPIOG_BSRR (w) register accessor: GPIO port bit set/reset register\n\nYou can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpiog_bsrr::W`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_bsrr`]
module"]
pub type GPIOG_BSRR = crate::Reg<gpiog_bsrr::GPIOG_BSRR_SPEC>;
#[doc = "GPIO port bit set/reset register"]
pub mod gpiog_bsrr;
#[doc = "GPIOG_LCKR (rw) register accessor: This register is used to lock the configuration of the port bits when a correct write sequence is applied to bit 16 (LCKK). The value of bits \\[15:0\\]
is used to lock the configuration of the GPIO. During the write sequence, the value of LCKR\\[15:0\\]
must not change. When the LOCK sequence has been applied on a port bit, the value of this port bit can no longer be modified until the next MCU reset or peripheral reset. A specific write sequence is used to write to the GPIOx_LCKR register. Only word access (32-bit long) is allowed during this locking sequence. Each lock bit freezes a specific configuration register (control and alternate function registers).\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_lckr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpiog_lckr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_lckr`]
module"]
pub type GPIOG_LCKR = crate::Reg<gpiog_lckr::GPIOG_LCKR_SPEC>;
#[doc = "This register is used to lock the configuration of the port bits when a correct write sequence is applied to bit 16 (LCKK). The value of bits \\[15:0\\]
is used to lock the configuration of the GPIO. During the write sequence, the value of LCKR\\[15:0\\]
must not change. When the LOCK sequence has been applied on a port bit, the value of this port bit can no longer be modified until the next MCU reset or peripheral reset. A specific write sequence is used to write to the GPIOx_LCKR register. Only word access (32-bit long) is allowed during this locking sequence. Each lock bit freezes a specific configuration register (control and alternate function registers)."]
pub mod gpiog_lckr;
#[doc = "GPIOG_AFRL (rw) register accessor: GPIO alternate function low register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_afrl::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpiog_afrl::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_afrl`]
module"]
pub type GPIOG_AFRL = crate::Reg<gpiog_afrl::GPIOG_AFRL_SPEC>;
#[doc = "GPIO alternate function low register"]
pub mod gpiog_afrl;
#[doc = "GPIOG_AFRH (rw) register accessor: GPIO alternate function high register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_afrh::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpiog_afrh::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_afrh`]
module"]
pub type GPIOG_AFRH = crate::Reg<gpiog_afrh::GPIOG_AFRH_SPEC>;
#[doc = "GPIO alternate function high register"]
pub mod gpiog_afrh;
#[doc = "GPIOG_BRR (w) register accessor: GPIO port bit reset register\n\nYou can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpiog_brr::W`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_brr`]
module"]
pub type GPIOG_BRR = crate::Reg<gpiog_brr::GPIOG_BRR_SPEC>;
#[doc = "GPIO port bit reset register"]
pub mod gpiog_brr;
#[doc = "GPIOG_HWCFGR10 (r) register accessor: For GPIOA, B, C, D, E, F, G, H, I, J and GPIOK: For GPIOZ:\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_hwcfgr10::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_hwcfgr10`]
module"]
pub type GPIOG_HWCFGR10 = crate::Reg<gpiog_hwcfgr10::GPIOG_HWCFGR10_SPEC>;
#[doc = "For GPIOA, B, C, D, E, F, G, H, I, J and GPIOK: For GPIOZ:"]
pub mod gpiog_hwcfgr10;
#[doc = "GPIOG_HWCFGR9 (r) register accessor: For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_hwcfgr9::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_hwcfgr9`]
module"]
pub type GPIOG_HWCFGR9 = crate::Reg<gpiog_hwcfgr9::GPIOG_HWCFGR9_SPEC>;
#[doc = "For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:"]
pub mod gpiog_hwcfgr9;
#[doc = "GPIOG_HWCFGR8 (r) register accessor: For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_hwcfgr8::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_hwcfgr8`]
module"]
pub type GPIOG_HWCFGR8 = crate::Reg<gpiog_hwcfgr8::GPIOG_HWCFGR8_SPEC>;
#[doc = "For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:"]
pub mod gpiog_hwcfgr8;
#[doc = "GPIOG_HWCFGR7 (r) register accessor: GPIO hardware configuration register 7\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_hwcfgr7::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_hwcfgr7`]
module"]
pub type GPIOG_HWCFGR7 = crate::Reg<gpiog_hwcfgr7::GPIOG_HWCFGR7_SPEC>;
#[doc = "GPIO hardware configuration register 7"]
pub mod gpiog_hwcfgr7;
#[doc = "GPIOG_HWCFGR6 (r) register accessor: GPIO hardware configuration register 6\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_hwcfgr6::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_hwcfgr6`]
module"]
pub type GPIOG_HWCFGR6 = crate::Reg<gpiog_hwcfgr6::GPIOG_HWCFGR6_SPEC>;
#[doc = "GPIO hardware configuration register 6"]
pub mod gpiog_hwcfgr6;
#[doc = "GPIOG_HWCFGR5 (r) register accessor: GPIO hardware configuration register 5\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_hwcfgr5::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_hwcfgr5`]
module"]
pub type GPIOG_HWCFGR5 = crate::Reg<gpiog_hwcfgr5::GPIOG_HWCFGR5_SPEC>;
#[doc = "GPIO hardware configuration register 5"]
pub mod gpiog_hwcfgr5;
#[doc = "GPIOG_HWCFGR4 (r) register accessor: GPIO hardware configuration register 4\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_hwcfgr4::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_hwcfgr4`]
module"]
pub type GPIOG_HWCFGR4 = crate::Reg<gpiog_hwcfgr4::GPIOG_HWCFGR4_SPEC>;
#[doc = "GPIO hardware configuration register 4"]
pub mod gpiog_hwcfgr4;
#[doc = "GPIOG_HWCFGR3 (r) register accessor: GPIO hardware configuration register 3\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_hwcfgr3::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_hwcfgr3`]
module"]
pub type GPIOG_HWCFGR3 = crate::Reg<gpiog_hwcfgr3::GPIOG_HWCFGR3_SPEC>;
#[doc = "GPIO hardware configuration register 3"]
pub mod gpiog_hwcfgr3;
#[doc = "GPIOG_HWCFGR2 (r) register accessor: GPIO hardware configuration register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_hwcfgr2::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_hwcfgr2`]
module"]
pub type GPIOG_HWCFGR2 = crate::Reg<gpiog_hwcfgr2::GPIOG_HWCFGR2_SPEC>;
#[doc = "GPIO hardware configuration register 2"]
pub mod gpiog_hwcfgr2;
#[doc = "GPIOG_HWCFGR1 (r) register accessor: GPIO hardware configuration register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_hwcfgr1::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_hwcfgr1`]
module"]
pub type GPIOG_HWCFGR1 = crate::Reg<gpiog_hwcfgr1::GPIOG_HWCFGR1_SPEC>;
#[doc = "GPIO hardware configuration register 1"]
pub mod gpiog_hwcfgr1;
#[doc = "GPIOG_HWCFGR0 (r) register accessor: GPIO hardware configuration register 0\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_hwcfgr0::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_hwcfgr0`]
module"]
pub type GPIOG_HWCFGR0 = crate::Reg<gpiog_hwcfgr0::GPIOG_HWCFGR0_SPEC>;
#[doc = "GPIO hardware configuration register 0"]
pub mod gpiog_hwcfgr0;
#[doc = "GPIOG_VERR (r) register accessor: GPIO version register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_verr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_verr`]
module"]
pub type GPIOG_VERR = crate::Reg<gpiog_verr::GPIOG_VERR_SPEC>;
#[doc = "GPIO version register"]
pub mod gpiog_verr;
#[doc = "GPIOG_IPIDR (r) register accessor: GPIO identification register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_ipidr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_ipidr`]
module"]
pub type GPIOG_IPIDR = crate::Reg<gpiog_ipidr::GPIOG_IPIDR_SPEC>;
#[doc = "GPIO identification register"]
pub mod gpiog_ipidr;
#[doc = "GPIOG_SIDR (r) register accessor: GPIO size identification register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpiog_sidr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpiog_sidr`]
module"]
pub type GPIOG_SIDR = crate::Reg<gpiog_sidr::GPIOG_SIDR_SPEC>;
#[doc = "GPIO size identification register"]
pub mod gpiog_sidr;
|
#[derive(Clone, Copy)]
struct Point { x: i32, y: i32 }
fn main() {
let c = 'Q';
let ref ref_c1 = c;
let ref_c2 = &c;
println!("ref_c1 equals ref_c2: {}", *ref_c1 == *ref_c2);
let point = Point = { x: 0, y: 0 };
let _copy_of_x = {
let Point { x: ref ref_to_x, y: _ }
};
}
|
use nalgebra_glm::Vec3;
use nalgebra_glm::Vec4;
#[derive(Debug, Clone)]
pub enum Light {
Point(PointLight),
}
#[derive(Debug, Clone)]
pub struct PointLight {
pub position: Vec4,
pub color: Vec3,
pub radius: f32,
}
impl PointLight {
pub fn new(position: Vec4, color: Vec3, radius: f32) -> Self {
log::trace!("insance of {}", std::any::type_name::<Self>());
PointLight {
position,
color,
radius,
}
}
pub fn default_lights() -> Vec<Light> {
let a_light = PointLight::new(
Vec4::new(0.0, 0.0, 5.0, 0.0),
Vec3::new(1.0, 1.0, 1.0),
15.0 * 0.25,
);
let b_light = PointLight::new(
Vec4::new(-2.0, 5.0, 0.0, 0.0),
Vec3::new(1.0, 0.0, 0.0),
15.0,
);
// let c_light = PointLight::new(
// Vec4::new(2.0, -1.0, 0.0, 0.0),
// Vec3::new(0.0, 0.0, 2.5),
// 5.0,
// );
// let d_light = PointLight::new(
// Vec4::new(0.0, -0.9, 0.5, 0.0),
// Vec3::new(1.0, 1.0, 0.0),
// 2.0,
// );
// let e_light = PointLight::new(
// Vec4::new(0.0, -0.5, 0.0, 0.0),
// Vec3::new(0.0, 1.0, 0.2),
// 5.0,
// );
// let f_light = PointLight::new(
// Vec4::new(0.0, -1.0, 0.0, 0.0),
// Vec3::new(1.0, 0.7, 0.3),
// 25.0,
// );
[
a_light, b_light,
// c_light, d_light, e_light, f_light
]
.iter()
.map(|pl| Light::Point(pl.clone()))
.collect()
}
}
|
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
use crate::field::FieldElement;
use utils::{batch_iter_mut, collections::Vec, iter_mut, uninit_vector};
#[cfg(feature = "concurrent")]
use utils::iterators::*;
// MATH FUNCTIONS
// ================================================================================================
/// Returns a vector containing successive powers of a given base.
///
/// More precisely, for base `b`, generates a vector with values [1, b, b^2, b^3, ..., b^(n-1)].
///
/// When `concurrent` feature is enabled, series generation is done concurrently in multiple
/// threads.
///
/// # Examples
/// ```
/// # use winter_math::get_power_series;
/// # use winter_math::{fields::{f128::BaseElement}, FieldElement};
/// let n = 2048;
/// let b = BaseElement::from(3u8);
///
/// let expected = (0..n)
/// .map(|p| b.exp((p as u64).into()))
/// .collect::<Vec<_>>();
///
/// let actual = get_power_series(b, n);
/// assert_eq!(expected, actual);
/// ```
pub fn get_power_series<E>(b: E, n: usize) -> Vec<E>
where
E: FieldElement,
{
let mut result = unsafe { uninit_vector(n) };
batch_iter_mut!(&mut result, 1024, |batch: &mut [E], batch_offset: usize| {
let start = b.exp((batch_offset as u64).into());
fill_power_series(batch, b, start);
});
result
}
/// Returns a vector containing successive powers of a given base offset by the specified value.
///
/// More precisely, for base `b` and offset `s`, generates a vector with values
/// [s, s * b, s * b^2, s * b^3, ..., s * b^(n-1)].
///
/// When `concurrent` feature is enabled, series generation is done concurrently in multiple
/// threads.
///
/// # Examples
/// ```
/// # use winter_math::get_power_series_with_offset;
/// # use winter_math::{fields::{f128::BaseElement}, FieldElement};
/// let n = 2048;
/// let b = BaseElement::from(3u8);
/// let s = BaseElement::from(7u8);
///
/// let expected = (0..n)
/// .map(|p| s * b.exp((p as u64).into()))
/// .collect::<Vec<_>>();
///
/// let actual = get_power_series_with_offset(b, s, n);
/// assert_eq!(expected, actual);
/// ```
pub fn get_power_series_with_offset<E>(b: E, s: E, n: usize) -> Vec<E>
where
E: FieldElement,
{
let mut result = unsafe { uninit_vector(n) };
batch_iter_mut!(&mut result, 1024, |batch: &mut [E], batch_offset: usize| {
let start = s * b.exp((batch_offset as u64).into());
fill_power_series(batch, b, start);
});
result
}
/// Computes element-wise sum of the provided vectors, and stores the result in the first vector.
///
/// When `concurrent` feature is enabled, the summation is performed concurrently in multiple
/// threads.
///
/// # Panics
/// Panics if lengths of `a` and `b` vectors are not the same.
///
/// # Examples
/// ```
/// # use winter_math::add_in_place;
/// # use winter_math::{fields::{f128::BaseElement}, FieldElement};
/// let a = BaseElement::prng_vector([0; 32], 2048);
/// let b = BaseElement::prng_vector([1; 32], 2048);
///
/// let mut c = a.clone();
/// add_in_place(&mut c, &b);
///
/// for ((a, b), c) in a.into_iter().zip(b).zip(c) {
/// assert_eq!(a + b, c);
/// }
/// ```
pub fn add_in_place<E>(a: &mut [E], b: &[E])
where
E: FieldElement,
{
assert!(
a.len() == b.len(),
"number of values must be the same for both operands"
);
iter_mut!(a).zip(b).for_each(|(a, &b)| *a += b);
}
/// Multiplies a sequence of values by a scalar and accumulates the results.
///
/// More precisely, computes `a[i]` + `b[i]` * `c` for all `i` and saves result into `a[i]`.
///
/// When `concurrent` feature is enabled, the computation is performed concurrently in multiple
/// threads.
///
/// # Panics
/// Panics if lengths of `a` and `b` slices are not the same.
///
/// # Examples
/// ```
/// # use winter_math::mul_acc;
/// # use winter_math::{fields::{f128::BaseElement}, FieldElement};
/// let a = BaseElement::prng_vector([0; 32], 2048);
/// let b = BaseElement::prng_vector([1; 32], 2048);
/// let c = BaseElement::new(12345);
///
/// let mut d = a.clone();
/// mul_acc(&mut d, &b, c);
///
/// for ((a, b), d) in a.into_iter().zip(b).zip(d) {
/// assert_eq!(a + b * c, d);
/// }
/// ```
pub fn mul_acc<B, E>(a: &mut [E], b: &[B], c: E)
where
B: FieldElement,
E: FieldElement + From<B>,
{
assert!(
a.len() == b.len(),
"number of values must be the same for both slices"
);
iter_mut!(a).zip(b).for_each(|(a, &b)| *a += E::from(b) * c);
}
/// Computes a multiplicative inverse of a sequence of elements using batch inversion method.
///
/// Any ZEROs in the provided sequence are ignored.
///
/// When `concurrent` feature is enabled, the inversion is performed concurrently in multiple
/// threads.
///
/// This function is significantly faster than inverting elements one-by-one because it
/// essentially transforms `n` inversions into `4 * n` multiplications + 1 inversion.
///
/// # Examples
/// ```
/// # use winter_math::batch_inversion;
/// # use winter_math::{fields::{f128::BaseElement}, FieldElement};
/// let a = BaseElement::prng_vector([1; 32], 2048);
/// let b = batch_inversion(&a);
///
/// for (&a, &b) in a.iter().zip(b.iter()) {
/// assert_eq!(a.inv(), b);
/// }
/// ```
pub fn batch_inversion<E>(values: &[E]) -> Vec<E>
where
E: FieldElement,
{
let mut result: Vec<E> = unsafe { uninit_vector(values.len()) };
batch_iter_mut!(&mut result, 1024, |batch: &mut [E], batch_offset: usize| {
let start = batch_offset;
let end = start + batch.len();
serial_batch_inversion(&values[start..end], batch);
});
result
}
/// Returns base 2 logarithm of `n`, where `n` is a power of two.
///
/// # Panics
/// Panics if `n` is not a power of two.
///
/// # Examples
/// ```
/// # use winter_math::log2;
/// assert_eq!(log2(1), 0);
/// assert_eq!(log2(16), 4);
/// assert_eq!(log2(1 << 20), 20);
/// assert_eq!(log2(2usize.pow(20)), 20);
/// ```
pub fn log2(n: usize) -> u32 {
assert!(n.is_power_of_two(), "n must be a power of two");
n.trailing_zeros()
}
// HELPER FUNCTIONS
// ------------------------------------------------------------------------------------------------
#[inline(always)]
fn fill_power_series<E: FieldElement>(result: &mut [E], base: E, start: E) {
result[0] = start;
for i in 1..result.len() {
result[i] = result[i - 1] * base;
}
}
fn serial_batch_inversion<E: FieldElement>(values: &[E], result: &mut [E]) {
let mut last = E::ONE;
for (result, &value) in result.iter_mut().zip(values.iter()) {
*result = last;
if value != E::ZERO {
last *= value;
}
}
last = last.inv();
for i in (0..values.len()).rev() {
if values[i] == E::ZERO {
result[i] = E::ZERO;
} else {
result[i] *= last;
last *= values[i];
}
}
}
|
#[macro_use] extern crate itertools;
use intcode;
use cjp_threadpool::ThreadPool;
const TARGET: i64 = 19_690_720;
fn main() {
let start_time = std::time::Instant::now();
let mut memory = intcode::load_program("day2/input.txt").unwrap_or_else(|err| {
println!("Could not load input file!\n{:?}", err);
std::process::exit(1);
});
// Run the program with the tweaks specified in the question. Extract the value from memory address 0.
memory[1] = 12;
memory[2] = 2;
let (tx, rx) = std::sync::mpsc::channel();
let mut computer = intcode::ChannelIOComputer::new(&memory, rx, tx);
computer.run();
println!("Part 1: {}", computer.fetch_address_zero());
// Part 2: try every possible combination of values, looking for a combination that
// results in memory address 0 containing TARGET after execution completes. Just for
// the lulz, use a thread pool to parallelise the work.
let pool = ThreadPool::new_with_default_size();
for (noun, verb) in iproduct!(0..memory.len(), 0..memory.len()) {
let mut memory_copy = memory.clone();
memory_copy[1] = noun as i64;
memory_copy[2] = verb as i64;
pool.schedule(Box::new(move || {
let (tx, rx) = std::sync::mpsc::channel();
let mut computer = intcode::ChannelIOComputer::new(&memory_copy, rx, tx);
computer.run();
if computer.fetch_address_zero() == TARGET {
Some((noun * 100) + verb)
} else {
None
}
}));
}
let answer = loop {
if let Some(answer) = pool.results.recv().unwrap() { break answer; }
};
pool.terminate();
println!("Part 2: {}\nTime: {}ms", answer, start_time.elapsed().as_millis());
}
|
use super::super::ascii85::decode;
use anyhow::{anyhow, ensure, Result};
use std::convert::TryInto;
use std::net::Ipv4Addr;
fn read_as_u16(bytes: &[u8]) -> Result<Vec<u16>> {
let words = bytes
.chunks_exact(2)
.map(|chunk| {
let word: Result<[u8; 2]> = chunk
.try_into()
.map_err(|_| anyhow!("couldn't fit {} bytes into 16 bit word", chunk.len()));
word.map(u16::from_be_bytes)
})
.collect::<Result<Vec<u16>>>()?;
Ok(words)
}
fn read_as_u16_unchecked(bytes: &[u8]) -> Vec<u16> {
let err_msg = format!("failed to read byte array as Vec<u16> {:?}", bytes);
read_as_u16(bytes).expect(&err_msg)
}
#[derive(Debug)]
struct EmptyUdpPacket(UdpPacket);
impl EmptyUdpPacket {
fn set_data(mut self, data: &[u8]) -> UdpPacket {
self.0.data = data.to_vec();
self.0
}
fn len(&self) -> u16 {
self.0.udp_header.length - 8
}
}
#[derive(Debug)]
struct UdpPacket {
ip_header: Ipv4Header,
udp_psuedo_header: UdpPseudoHeader,
udp_header: UdpHeader,
data: Vec<u8>,
}
impl UdpPacket {
fn parse_headers(bytes: [u8; 28]) -> Result<EmptyUdpPacket> {
let ip_header = Ipv4Header::from_bytes(&bytes[0..20])?;
let (udp_psuedo_header, udp_header) = parse_udp_headers(&ip_header, &bytes[20..28])?;
Ok(EmptyUdpPacket(UdpPacket {
ip_header,
udp_psuedo_header,
udp_header,
data: Vec::new(),
}))
}
fn valid_ip_checksum(&self) -> bool {
self.ip_header.valid_checksum()
}
fn valid_udp_checksum(&self) -> bool {
let mut bytes: Vec<u8> = Vec::with_capacity(20);
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#IPv4_pseudo_header
// Source Address
// Destination Address
// Zeroes
// Protocol
// UDP Length
// Source Port
// Destination Port
// Length
// Checksum
// Data
bytes.append(&mut self.udp_psuedo_header.source_address.octets().to_vec());
bytes.append(&mut self.udp_psuedo_header.destination_address.octets().to_vec());
bytes.push(0x00);
bytes.push(self.udp_psuedo_header.protocol);
bytes.append(&mut self.udp_psuedo_header.udp_length.to_be_bytes().to_vec());
bytes.append(&mut self.udp_header.source_port.to_be_bytes().to_vec());
bytes.append(&mut self.udp_header.destination_port.to_be_bytes().to_vec());
bytes.append(&mut self.udp_header.length.to_be_bytes().to_vec());
bytes.append(&mut self.udp_header.checksum.to_be_bytes().to_vec());
bytes.extend_from_slice(&self.data);
if bytes.len() % 2 != 0 {
bytes.push(0)
}
read_as_u16_unchecked(&bytes)
.iter()
.fold(0xffff, |sum, &next| ones_complement_sum(sum, next))
== 0xffff
}
fn valid_checksums(&self) -> bool {
let valid_udp = self.valid_udp_checksum();
let valid_ip = self.valid_ip_checksum();
valid_ip && valid_udp
}
}
#[test]
fn test_udp_parse() -> Result<()> {
// ip cksum 0c741
//udp cksum xcc52
let bytes: [u8; 40] = [
// ip header
0x45, 0x00, // stuff i can ignore :)
0x00, 0x28, // total length (40)
0xb5, 0x81, 0x00, 0x00, 0x40, // stuff i can ignore :)
0x11, // protocol (17 -> UDP)
0xc7, 0x41, // ip cksum
0x7f, 0x00, 0x00, 0x01, // src addr (127.0.0.1)
0x7f, 0x00, 0x00, 0x01, // dest addr (127.0.0.1)
// no options
// udp header
0xc9, 0x64, // src port (51556)
0x1f, 0xbd, // dest port (8125)
0x00, 0x14, // udp length (header + data, 8 + 12 = 20)
0xcc, 0x52, // udp chksum
// data
0x72, 0x75, 0x73, 0x74, 0x20, 0x69, 0x73, 0x20, 0x63, 0x6f, 0x6f,
0x6c, // (rust is cool)
];
let header: [u8; 28] = bytes[0..28].try_into()?;
let data: [u8; 12] = bytes[28..].try_into()?;
let packet = UdpPacket::parse_headers(header)?.set_data(&data);
assert_eq!(packet.ip_header.source, Ipv4Addr::new(127, 0, 0, 1));
assert_eq!(packet.ip_header.destination, Ipv4Addr::new(127, 0, 0, 1));
assert!(packet.ip_header.valid_checksum());
assert_eq!(packet.udp_psuedo_header.source_address, Ipv4Addr::LOCALHOST);
assert_eq!(
packet.udp_psuedo_header.destination_address,
Ipv4Addr::LOCALHOST
);
assert_eq!(packet.udp_psuedo_header.protocol, 17);
assert_eq!(packet.udp_psuedo_header.udp_length, 20);
assert_eq!(packet.udp_header.source_port, 51556);
assert_eq!(packet.udp_header.destination_port, 8125);
assert_eq!(packet.udp_header.length, 20);
assert!(packet.valid_udp_checksum());
assert!(packet.valid_checksums()); // already checked individually, but make sure this method works
Ok(())
}
#[derive(Debug)]
// an IPv4 packet has much more info than this, but for this we only care about these fields
struct Ipv4Header {
source: Ipv4Addr,
destination: Ipv4Addr,
checksum: u16,
words: Vec<u16>, // todo protocol 0x11
}
impl Ipv4Header {
fn from_bytes(bytes: &[u8]) -> Result<Ipv4Header> {
ensure!(
bytes.len() == 20,
anyhow!("Invalid header length={}", bytes.len())
);
let checksum: [u8; 2] = bytes[10..12].try_into()?;
let src: [u8; 4] = bytes[12..16].try_into()?;
let dst: [u8; 4] = bytes[16..20].try_into()?;
let source = Ipv4Addr::from(src);
let destination = Ipv4Addr::from(dst);
Ok(Ipv4Header {
source,
destination,
checksum: u16::from_be_bytes(checksum),
words: read_as_u16(bytes)?,
})
}
fn valid_checksum(&self) -> bool {
self.words
.iter()
.fold(0xffff, |sum, &next| ones_complement_sum(sum, next))
== 0xffff
}
}
#[test]
fn test_from_bytes() -> Result<()> {
// a random packet from tcpdump
let packet = [
0x45, 0x00, 0x00, 0xd0, 0xb4, 0x2a, 0x40, 0x00, 0x40, 0x06, 0xc2, 0xd8, 0xac, 0x18, 0xba,
0xf2, 0xac, 0x18, 0xb0, 0x01,
];
let out = Ipv4Header::from_bytes(&packet)?;
assert_eq!(out.source, Ipv4Addr::new(172, 24, 186, 242));
assert_eq!(out.destination, Ipv4Addr::new(172, 24, 176, 1));
Ok(())
}
#[derive(Debug)]
struct UdpPseudoHeader {
source_address: Ipv4Addr,
destination_address: Ipv4Addr,
protocol: u8,
udp_length: u16,
}
#[derive(Debug)]
struct UdpHeader {
source_port: u16,
destination_port: u16,
length: u16, // wtf two lengths (https://stackoverflow.com/a/26356487)
checksum: u16,
}
fn parse_udp_headers(ip_header: &Ipv4Header, bytes: &[u8]) -> Result<(UdpPseudoHeader, UdpHeader)> {
ensure!(
bytes.len() == 8,
anyhow!("Invalid header length={}", bytes.len())
);
let src: [u8; 2] = bytes[..2].try_into()?;
let dest: [u8; 2] = bytes[2..4].try_into()?;
let length: [u8; 2] = bytes[4..6].try_into()?;
let checksum: [u8; 2] = bytes[6..8].try_into()?;
let psuedo_header = UdpPseudoHeader {
source_address: ip_header.source,
destination_address: ip_header.destination,
protocol: 0x11, // todo
udp_length: u16::from_be_bytes(length),
};
let header = UdpHeader {
source_port: u16::from_be_bytes(src),
destination_port: u16::from_be_bytes(dest),
length: u16::from_be_bytes(length),
checksum: u16::from_be_bytes(checksum),
};
Ok((psuedo_header, header))
}
fn ones_complement_sum(x: u16, y: u16) -> u16 {
let sum: u32 = x as u32 + y as u32;
let low_word = (sum & 0xffff) as u16;
low_word + ((sum >> 16) as u16)
}
#[test]
fn test_ones_complement_sum() {
// 0001 0110 22
// + 0000 0011 3
// =========== ====
// 0001 1001 25z
let x = 0b_0000_0000_0001_0110;
let y = 0b_0000_0000_0000_0011;
assert_eq!(ones_complement_sum(x, y), 0b_0000_0000_0001_1001);
// 1111 1110 -1 (254)
// + 0000 0001 1
// =========== ====
// 1111 1111 -0 (lol)
let x = 0b_0000_0000_1111_1110;
let y = 0b_0000_0000_0000_0001;
assert_eq!(ones_complement_sum(x, y), 0b_0000_0000_1111_1111);
// 1111 1110 -1 (254)
// + 0000 0011 3
// =========== ====
// 1 0000 0001
// \________
// \
// + 0000 0001
// =========== ====
// 0000 0010 2
let x = 0b_1111_1111_1111_1110;
let y = 0b_0000_0000_0000_0011;
assert_eq!(ones_complement_sum(x, y), 0b_0000_0000_0000_0010);
}
fn parse_and_filter_packets(bytes: &[u8]) -> Result<Vec<UdpPacket>> {
// take 28 bytes
// parse a udp packet
// let n = length
// take n bytes
// set data
let mut idx = 0;
let mut packets = Vec::new();
while idx < bytes.len() {
if idx > bytes.len() {
break;
}
let data_start = idx + 28;
let header_data: [u8; 28] = bytes[idx..data_start].try_into()?;
let header = UdpPacket::parse_headers(header_data)?;
let data_end = data_start + header.len() as usize;
if data_start > bytes.len() || data_end > bytes.len() {
eprintln!("Ran out of data while processing header= {:#?}. idx={:?}, data_start={:?}, data_end={:?}, bytes.len={:?}", header, idx, data_start, data_end, bytes.len());
eprintln!("header bytes: \n {:?}", header_data);
break;
}
packets.push(header.set_data(&bytes[data_start..data_end]));
idx = data_end;
}
Ok(packets
.into_iter()
.filter(|packet| {
packet.valid_checksums()
&& packet.ip_header.source == Ipv4Addr::new(10, 1, 1, 10)
&& packet.ip_header.destination == Ipv4Addr::new(10, 1, 1, 200)
&& packet.udp_header.destination_port == 42069
})
.collect())
}
pub fn run(bytes: &[u8]) -> Result<Vec<u8>> {
let packets = parse_and_filter_packets(&decode(bytes)?)?;
Ok(packets
.into_iter()
.map(|packet| packet.data)
.flatten()
.collect())
}
|
use std::ops::{Add, Sub};
#[derive(Debug)]
pub enum Error {
SideTooShort,
TriangleInequality,
}
pub struct Triangle<T> {
sides: [T; 3],
}
// I wanted to try satisfying the optional tests without using any crates or creating separate
// impls for each numeric type, so this is a bit wonky.
//
// Any type T which implements addition, subtraction, and comparison can be Triangle sides, even
// when T is not something that would traditionally be considered "numeric". This means you can do
// funky things like create Triangles whose sides are std::time::Duration (time triangles!).
impl<T: Copy + PartialOrd + Add<Output = T> + Sub<Output = T>> Triangle<T> {
pub fn build(sides: [T; 3]) -> Result<Triangle<T>, Error> {
let zero: T = sides[0] - sides[0];
if sides.contains(&zero) {
Err(Error::SideTooShort)
} else if sides[0] + sides[1] < sides[2] || sides[1] + sides[2] < sides[0] ||
sides[2] + sides[0] < sides[1] {
Err(Error::TriangleInequality)
} else {
Ok(Triangle { sides: sides })
}
}
pub fn is_equilateral(&self) -> bool {
self.sides[0] == self.sides[1] && self.sides[1] == self.sides[2]
}
pub fn is_isosceles(&self) -> bool {
!self.is_equilateral() && !self.is_scalene()
}
pub fn is_scalene(&self) -> bool {
self.sides[0] != self.sides[1] && self.sides[1] != self.sides[2] &&
self.sides[2] != self.sides[0]
}
}
|
extern crate libc;
use libc::c_char;
use std::ffi::{CString, CStr};
use std::env;
use std::time::Instant;
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex};
use std::thread;
use std::sync::atomic::{AtomicBool, Ordering};
use std::io::{BufReader, BufRead, Error, ErrorKind};
use std::fs::{File, OpenOptions};
use std::io::prelude::*;
//const BUFF_SIZE: usize = 1024; // we dont need this because channel uses vec
const BUFF_ENTRY_SIZE: usize = 1024;
const MAX_INFILES: usize = 10;
const MAX_REQUESTERS: usize = 5;
const MAX_RESOLVERS: usize = 10;
extern
{
fn dnslookup(hostname: *const libc::c_char, firstIPstr: *const libc::c_char, maxSize: i32) -> i32;
}
// safe rust bindings for the c dnslookup function
fn dns_lookup(hostname: &str) -> Result<String, &str>
{
let out_buf: [c_char; BUFF_ENTRY_SIZE] = [0; BUFF_ENTRY_SIZE];
let buf_ptr = out_buf.as_ptr();
let hostname_str = CString::new(hostname).unwrap();
let c_hostname_str = hostname_str.as_ptr();
let res = unsafe { dnslookup(c_hostname_str, buf_ptr, BUFF_ENTRY_SIZE as i32) };
if res == 0
{
let c_str: &CStr = unsafe { CStr::from_ptr(buf_ptr) };
let str_slice: &str = c_str.to_str().unwrap();
Ok(str_slice.to_owned())
}
else
{
Err("UTIL_FAILURE")
}
}
struct ConcBufReader{inner: Arc<Mutex<Option<BufReader<File>>>>}
struct ConcFileWriter{inner: Arc<Mutex<File>>}
impl ConcBufReader
{
pub fn read_line(&self, buf: &mut String) -> std::io::Result<usize>
{
let mut a = self.inner.lock().unwrap();
match &mut *a
{
Some(ref mut buf_reader) => buf_reader.read_line(buf),
None => Err(Error::new(ErrorKind::ConnectionRefused, "File empty."))
}
}
}
impl ConcFileWriter
{
pub fn writeln(&self, data: &str) -> std::io::Result<()>
{
let a = self.inner.lock().unwrap();
writeln!(&*a, "{}", data)
}
}
impl Clone for ConcFileWriter
{
fn clone(&self) -> Self
{
ConcFileWriter{inner: self.inner.clone()}
}
}
fn get_open_file(fd_list: &Vec<(ConcBufReader, AtomicBool)>) -> Option<usize>
{
for i in 0..fd_list.len()
{
if fd_list[i].1.compare_and_swap(true, false, Ordering::Relaxed) // if true set to false and then
{
return Some(i);
}
}
return None;
}
fn requester_loop(fd_list: Arc<Vec<(ConcBufReader, AtomicBool)>>, tx: Sender<String>, log_fd: ConcFileWriter)
{
let mut working_file_op: Option<usize> = get_open_file(&fd_list);
let mut files_served = 0;
while let Some(working_file) = working_file_op
{
let mut buf = String::new();
match fd_list[working_file].0.read_line(&mut buf)
{
Ok(len) if len > 0 => { let _ = tx.send(buf.trim().to_owned()); },
_ => { working_file_op = get_open_file(&fd_list); files_served+=1; }
}
//println!("buf: {}", buf.trim());
//std::thread::sleep_ms(100);
}
let res = format!("Thread Files served: {}", files_served);
let _ = log_fd.writeln(&res);
//println!("{}", res);
}
fn resolver_loop(rx: Arc<Mutex<Receiver<String>>>, log_fd: ConcFileWriter)
{
loop
{
let data = {
let inner_rx = rx.lock().unwrap();
match inner_rx.recv()
{
Ok(data) => data,
_ => break
}
};
let res = match dns_lookup(&data)
{
Ok(dns_res) => format!("{}, {}", &data, &dns_res),
Err(_) => format!("{}, ", &data)
};
// write to file
let _ = log_fd.writeln(&res);
//println!("{}", res);
//drop(res);
}
}
fn start_requester_resolver_loop<'a>(num_req: i32, num_res: i32, req_log_file: &str, res_log_file: &str, in_files: Vec<&'a str>)
{
let (tx, rx) = channel::<String>(); // reader writer buffer
let rx = Arc::new(Mutex::new(rx));
// I would sort of like to use tokio or futures but for this project I am opting more for threads and channels
let mut req_childs = Vec::new();
let mut res_childs = Vec::new();
let req_log_fd = OpenOptions::new().write(true).append(true).create(true).open(req_log_file).unwrap();
let res_log_fd = OpenOptions::new().write(true).truncate(true).create(true).open(res_log_file).unwrap();
let req_log_fd = ConcFileWriter{inner: Arc::new(Mutex::new(req_log_fd))};
let res_log_fd = ConcFileWriter{inner: Arc::new(Mutex::new(res_log_fd))};
let fd_list = Arc::new(in_files.iter().map(|file|
{
let fd = OpenOptions::new().read(true).open(file).ok(); // we want a list of Options
(ConcBufReader{inner: match fd
{
Some(fd) => Arc::new(Mutex::new(Some(BufReader::new(fd)))),
None => Arc::new(Mutex::new(None)) // this means that the file is closed for reading
}
}, AtomicBool::new(true))
}).collect::<Vec<(ConcBufReader, AtomicBool)>>()); // a little over on the arcs
for _i in 0..num_req
{
let this_req_log_fd = req_log_fd.clone();
let this_tx = tx.clone();
let this_fd_list = fd_list.clone();
req_childs.push(thread::spawn(move || requester_loop(this_fd_list, this_tx, this_req_log_fd)));
}
for _i in 0..num_res
{
let this_res_log_fd = res_log_fd.clone();
let this_rx = rx.clone();
res_childs.push(thread::spawn(move || resolver_loop(this_rx, this_res_log_fd)));
}
for child in req_childs
{
child.join().unwrap();
}
drop(tx); // The resolver exits when all tx clones are dropped
for child in res_childs
{
child.join().unwrap();
}
}
fn main()
{
let args: Vec<String> = env::args().collect();
let num_requesters: i32;
let num_resolvers: i32;
let requesters_log_file: &str;
let resolvers_log_file: &str;
let mut in_files: Vec<&str> = Vec::new();
if args.len() > MAX_INFILES + 5
{
println!("Wrong number of inputs. Expected:");
println!("multi-lookup <#requesters> <#resolvers> <#req_log> <#res_log> [<#infile>, ...]")
}
else if args.len() < 6
{
println!("Wrong number of inputs. Expected:");
println!("multi-lookup <#requesters> <#resolvers> <#req_log> <#res_log> [<#infile>, ...]")
}
else
{
num_requesters = args[1].parse::<i32>().unwrap();
num_resolvers = args[2].parse::<i32>().unwrap();
requesters_log_file = &args[3];
resolvers_log_file = &args[4];
for i in 0..(args.len()-5)
{
in_files.push(&args[i+5])
}
if num_requesters as usize > MAX_REQUESTERS
{
println!("Too many requesters, max: {}", MAX_REQUESTERS);
}
else if num_resolvers as usize > MAX_RESOLVERS
{
println!("Too many resolvers, max: {}", MAX_RESOLVERS);
}
else
{
let now = Instant::now();
start_requester_resolver_loop(num_requesters, num_resolvers, requesters_log_file, resolvers_log_file, in_files);
let elapsed = now.elapsed();
println!("Took {} micro secs ({} sec) to finish", elapsed.as_micros(), elapsed.as_micros() as f32 / 1000000.0);
}
}
}
|
use amethyst::ecs::Entity;
use crate::config::MapConfig;
#[derive(Default)]
pub struct Map {
width: usize,
height: usize,
tile_size: usize,
tiles: Vec<Entity>
}
impl Map {
pub fn new(config: &MapConfig) -> Self {
Map {
width: config.width,
height: config.height,
tile_size: config.tile_size,
tiles: Vec::new()
}
}
pub fn width(&self) -> usize {
self.width
}
pub fn height(&self) -> usize {
self.height
}
pub fn tile_size(&self) -> usize {
self.tile_size
}
pub fn ratio(&self) -> f32 {
1.0 / self.tile_size as f32
}
pub fn size(&self) -> usize {
self.width * self.height
}
pub fn tile(&self, x: usize, y: usize) -> Entity {
let idx = x + y * self.width;
self.tiles[idx]
}
pub fn add_tile(&mut self, e: Entity) {
self.tiles.push(e);
}
pub fn in_bound(&self, x: f32, y: f32) -> bool {
x >= 0. && y >= 0. && x < self.width as f32 && y < self.height as f32
}
}
|
const INPUT: &str = "1,0,0,3,1,1,2,3,1,3,4,3,1,5,0,3,2,10,1,19,1,6,19,23,2,23,6,27,1,5,27,31,1,31,9,35,2,10,35,39,1,5,39,43,2,43,10,47,1,47,6,51,2,51,6,55,2,55,13,59,2,6,59,63,1,63,5,67,1,6,67,71,2,71,9,75,1,6,75,79,2,13,79,83,1,9,83,87,1,87,13,91,2,91,10,95,1,6,95,99,1,99,13,103,1,13,103,107,2,107,10,111,1,9,111,115,1,115,10,119,1,5,119,123,1,6,123,127,1,10,127,131,1,2,131,135,1,135,10,0,99,2,14,0,0";
fn main() {
part1();
part2();
}
fn part1() {
let mut memory = Vec::new();
parse_line(INPUT, &mut memory);
memory[1] = 12;
memory[2] = 2;
run(&mut memory);
println!("memory[0]={}", memory[0]);
}
fn part2() {
for noun in 0..100 {
for verb in 0..100 {
let mut memory = Vec::new();
parse_line(INPUT, &mut memory);
memory[1] = noun;
memory[2] = verb;
run(&mut memory);
if memory[0] == 19690720 {
println!("noun {}, verb {}", noun, verb);
println!("100 * noun + verb = {}", (100 * noun + verb));
}
}
}
}
fn run(memory: &mut Vec<usize>) {
let mut pc = 0;
loop {
match memory[pc] {
1 => {
let arg1 = memory[pc + 1];
let value1 = memory[arg1];
let arg2 = memory[pc + 2];
let value2 = memory[arg2];
let target = memory[pc + 3];
//println!("arg1={}, value1={}, arg2={}, value2={}, target={}", arg1, value1, arg2, value2, target);
memory[target] = value1 + value2;
pc += 4;
},
2 => {
let arg1 = memory[pc + 1];
let value1 = memory[arg1];
let arg2 = memory[pc + 2];
let value2 = memory[arg2];
let target = memory[pc + 3];
//println!("arg1={}, value1={}, arg2={}, value2={}, target={}", arg1, value1, arg2, value2, target);
memory[target] = value1 * value2;
pc += 4;
},
99 => {
break;
}
opcode => {
panic!("unexpected op code {} at pc {}", opcode, pc);
}
}
}
}
fn parse_line(s:&str, memory: &mut Vec<usize>) {
let mut input:Vec<usize> = s.split(',').map(|x| x.parse().unwrap()).collect();
memory.append(&mut input);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn test_parse_line() {
let mut memory = Vec::new();
parse_line("1,2,3", &mut memory);
assert_eq!(vec![1,2,3], memory);
}
#[test]
pub fn test_sample0() {
let mut memory = Vec::new();
parse_line("1,0,0,3,99", &mut memory);
run(&mut memory);
assert_eq!(2, memory[3]);
}
#[test]
pub fn test_sample1() {
let mut memory = Vec::new();
parse_line("1,9,10,3,2,3,11,0,99,30,40,50", &mut memory);
run(&mut memory);
assert_eq!(70, memory[3]);
assert_eq!(3500, memory[0]);
}
#[test]
pub fn test_final() {
// this is wrong: 8017076
}
}
|
// Problem 20 - Factorial digit sum
//
// n! means n x (n - 1) x ... x 3 x 2 x 1
//
// For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800,
// and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
//
// Find the sum of the digits in the number 100!
fn main() {
println!("{}", solution());
}
fn solution() -> u32 {
let mut factorial = vec![1];
for n in 2..101 {
let mut new_fact = vec![];
let mut carry = 0;
for digit in factorial {
let product = digit*n + carry;
new_fact.push(product%10);
carry = product/10;
}
while carry > 0 {
new_fact.push(carry%10);
carry /= 10;
}
factorial = new_fact;
}
factorial.iter().fold(0, |sum, d| sum + d)
}
|
#[doc = "Reader of register ISACTIVER0"]
pub type R = crate::R<u32, super::ISACTIVER0>;
#[doc = "Writer for register ISACTIVER0"]
pub type W = crate::W<u32, super::ISACTIVER0>;
#[doc = "Register ISACTIVER0 `reset()`'s with value 0"]
impl crate::ResetValue for super::ISACTIVER0 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `ISACTIVER0`"]
pub type ISACTIVER0_R = crate::R<u32, u32>;
#[doc = "Write proxy for field `ISACTIVER0`"]
pub struct ISACTIVER0_W<'a> {
w: &'a mut W,
}
impl<'a> ISACTIVER0_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff_ffff) | ((value as u32) & 0xffff_ffff);
self.w
}
}
impl R {
#[doc = "Bits 0:31 - interrupt clear-pending"]
#[inline(always)]
pub fn isactiver0(&self) -> ISACTIVER0_R {
ISACTIVER0_R::new((self.bits & 0xffff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 0:31 - interrupt clear-pending"]
#[inline(always)]
pub fn isactiver0(&mut self) -> ISACTIVER0_W {
ISACTIVER0_W { w: self }
}
}
|
#[aoc_generator(day1)]
fn gen(input: &str) -> Vec<i32> {
input.lines()
.map(|line| line.parse().unwrap())
.collect()
}
#[aoc(day1, part1)]
fn part1(input: &Vec<i32>) -> i32 {
input.iter()
.map(|n| n / 3 - 2)
.sum()
}
#[aoc(day1, part2)]
fn part2(input: &Vec<i32>) -> i32 {
input.iter()
.map(|n| {
let mut t = 0;
let mut n = *n;
while n > 0 {
n = n / 3 - 2;
if n > 0 { t += n };
}
t
})
.sum()
} |
// Problem 42 - Coded triangle numbers
//
// The n-th term of the sequence of triangle numbers is given by, t(n) = ½n(n+1);
// so the first ten triangle numbers are:
//
// 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
//
// By converting each letter in a word to a number corresponding to its
// alphabetical position and adding these values we form a word value. For
// example, the word value for SKY is 19 + 11 + 25 = 55 = t(10). If the word value
// is a triangle number then we shall call the word a triangle word.
//
// Using "../resources/p042_words.txt", a 16K text file containing nearly
// two-thousand common English words, how many are triangle words?
use std::collections::HashSet;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
fn main() {
println!("{}", solution());
}
fn solution() -> usize {
let file = match File::open("../resources/p042_words.txt") {
Ok(f) => BufReader::new(f),
Err(_) => panic!("Error opening file"),
};
let words = file.split(b',')
.map(Result::unwrap)
.map(unquote)
.collect::<Vec<_>>();
let maxlen = words.iter().map(String::len).max().unwrap();
let triangles = (1..).map(triangle)
.take_while(|&t| t < maxlen*26)
.collect::<HashSet<usize>>();
words.iter()
.map(numerify)
.filter(|n| triangles.contains(&n))
.count()
}
fn unquote(s: Vec<u8>) -> String {
String::from_utf8(s).unwrap().trim_matches('"').to_string()
}
fn triangle(n: usize) -> usize {
n*(n + 1)/2
}
fn numerify(s: &String) -> usize {
s.chars().map(|c| (c as usize) - ('A' as usize) + 1).fold(0, |s, n| s + n)
}
|
use rlimit::{setrlimit, Resource};
use cgroups_rs::{MaxValue, CgroupPid};
use cgroups_rs::hierarchies::V2;
use cgroups_rs::cgroup_builder::CgroupBuilder;
use nix::unistd::Pid;
use std::convert::TryInto;
use std::fs::{canonicalize, remove_dir};
use crate::errors::Errcode;
const KMEM_LIMIT: i64 = 1024 * 1024 * 1024;
const MEM_LIMIT: i64 = KMEM_LIMIT;
const MAX_PID: MaxValue = MaxValue::Value(64);
const NOFILE_RLIMIT: u64 = 64;
pub fn restrict_resources(hostname: &String, pid: Pid) -> Result<(), Errcode> {
log::debug!("Restrictiong resources for hostname {}", hostname);
let cgs = CgroupBuilder::new(hostname)
.cpu().shares(256).done()
.memory().kernel_memory_limit(KMEM_LIMIT).memory_hard_limit(MEM_LIMIT).done()
.pid().maximum_number_of_processes(MAX_PID).done()
.blkio().weight(50).done()
.build(Box::new(V2::new()));
let pid: u64 = pid.as_raw().try_into().unwrap();
if let Err(_) = cgs.add_task(CgroupPid::from(pid)) {
return Err(Errcode::ResourcesError(0))
};
if let Err(_) = setrlimit(Resource::NOFILE, NOFILE_RLIMIT, NOFILE_RLIMIT) {
return Err(Errcode::ResourcesError(1))
};
Ok(())
}
pub fn clean_cgroups(hostname: &String) -> Result<(), Errcode> {
log::debug!("Cleaning cgroups");
match canonicalize(format!("/sys/fs/cgroup/{}/", hostname)) {
Ok(d) => {
if let Err(_) = remove_dir(d) {
return Err(Errcode::ResourcesError(2));
}
},
Err(e) => {
log::error!("Error while canonicalize path: {}", e);
return Err(Errcode::ResourcesError(3));
},
}
Ok(())
} |
/*
搜索旋转排序数组
假设按照升序排序的数组在预先未知的某个点上进行了旋转。
( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。
搜索一个给定的目标值,如果数组中存在这个目标值,则返回它的索引,否则返回 -1 。
你可以假设数组中不存在重复的元素。
你的算法时间复杂度必须是 O(log n) 级别。
示例 1:
输入: nums = [4,5,6,7,0,1,2], target = 0
输出: 4
示例 2:
输入: nums = [4,5,6,7,0,1,2], target = 3
输出: -1
*/
use crate::array::Solution;
impl Solution {
pub fn search(nums: Vec<i32>, target: i32) -> i32 {
if nums.is_empty() { return -1; }
if nums[0] == target { return 0; }
let r = nums.len() - 1;
if nums[r] == target { return r as i32; }
// assert!(nums.first().unwrap() >= nums.last().unwrap());
search(&nums, 0, r, target)
}
}
/// Search in `a(l..r)`
fn search(a: &[i32], l: usize, r: usize, t: i32) -> i32 {
if r - l <= 1 { return -1; } // base case
let m = (l + r) >> 1;
if a[m] == t { return m as i32; } // hit
if a[l] < a[m] {
if a[l] < t && t < a[m] { bin_search(a, l, m, t) } // O(log n)
else { search(a, m, r, t) } // T(n/2)
} else {
if a[m] < t && t < a[r] { bin_search(a, m, r, t) } // O(log n)
else { search(a, l, m, t) } // T(n/2)
}
}
/// Binary search in `a(l..r)`
fn bin_search(a: &[i32], l: usize, r: usize, t: i32) -> i32 {
if r - l <= 1 { return -1; } // base case
let m = (l + r) >> 1;
if a[m] == t { m as i32 } else if t < a[m] { bin_search(a, l, m, t) } else { bin_search(a, m, r, t) }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test() {
let cases = vec![
((vec![4, 5, 6, 7, 0, 1, 2], 0), 4),
((vec![4, 5, 6, 7, 0, 1, 2], 5), 1),
((vec![4, 1], 0), -1),
((vec![4, 1], 1), 1),
((vec![4, 5, 6, 7, 0, 1, 2], 3), -1),
];
for (input, output) in cases {
assert_eq!(Solution::search(input.0, input.1), output);
}
}
}
|
//! This library provides extension methods for the `Default` trait.
//!
//! ## Example
//! case1:
//! ```
//! # use default_ext::DefaultExt;
//! assert!(false.is_default());
//! ```
//!
//! case2:
//! ```ignore
//! #[derive(serde::Serialize, serde::Deserialize)]
//! struct Object {
//! #[serde(
//! default,
//! skip_serializing_if = "default_ext::DefaultExt::is_default",
//! )]
//! is_test: bool,
//! }
//! ```
pub trait DefaultExt {
fn is_default(&self) -> bool;
}
impl<T> DefaultExt for T
where
T: Default + PartialEq,
{
fn is_default(&self) -> bool {
self == &Self::default()
}
}
|
#[doc = "Register `BDTR` reader"]
pub type R = crate::R<BDTR_SPEC>;
#[doc = "Register `BDTR` writer"]
pub type W = crate::W<BDTR_SPEC>;
#[doc = "Field `DTG` reader - Dead-time generator setup"]
pub type DTG_R = crate::FieldReader;
#[doc = "Field `DTG` writer - Dead-time generator setup"]
pub type DTG_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 8, O>;
#[doc = "Field `LOCK` reader - Lock configuration"]
pub type LOCK_R = crate::FieldReader<LOCK_A>;
#[doc = "Lock configuration\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum LOCK_A {
#[doc = "0: No bit is write protected"]
Off = 0,
#[doc = "1: Any bits except MOE, OSSR, OSSI and LOCK in TIMx_BDTR register, OISx and OISxN bits in TIMx_CR2 register can no longer be written"]
Level1 = 1,
#[doc = "2: LOCK Level 1 + CC Polarity bits (CCxP/CCxNP bits in TIMx_CCER register, as long as the related channel is configured in output through the CCxS bits) as well as OSSR and OSSI bits can no longer be written"]
Level2 = 2,
#[doc = "3: LOCK Level 2 + CC Control bits (OCxM and OCxPE bits in TIMx_CCMRx registers, as long as the related channel is configured in output through the CCxS bits) can no longer be written"]
Level3 = 3,
}
impl From<LOCK_A> for u8 {
#[inline(always)]
fn from(variant: LOCK_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for LOCK_A {
type Ux = u8;
}
impl LOCK_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> LOCK_A {
match self.bits {
0 => LOCK_A::Off,
1 => LOCK_A::Level1,
2 => LOCK_A::Level2,
3 => LOCK_A::Level3,
_ => unreachable!(),
}
}
#[doc = "No bit is write protected"]
#[inline(always)]
pub fn is_off(&self) -> bool {
*self == LOCK_A::Off
}
#[doc = "Any bits except MOE, OSSR, OSSI and LOCK in TIMx_BDTR register, OISx and OISxN bits in TIMx_CR2 register can no longer be written"]
#[inline(always)]
pub fn is_level1(&self) -> bool {
*self == LOCK_A::Level1
}
#[doc = "LOCK Level 1 + CC Polarity bits (CCxP/CCxNP bits in TIMx_CCER register, as long as the related channel is configured in output through the CCxS bits) as well as OSSR and OSSI bits can no longer be written"]
#[inline(always)]
pub fn is_level2(&self) -> bool {
*self == LOCK_A::Level2
}
#[doc = "LOCK Level 2 + CC Control bits (OCxM and OCxPE bits in TIMx_CCMRx registers, as long as the related channel is configured in output through the CCxS bits) can no longer be written"]
#[inline(always)]
pub fn is_level3(&self) -> bool {
*self == LOCK_A::Level3
}
}
#[doc = "Field `LOCK` writer - Lock configuration"]
pub type LOCK_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 2, O, LOCK_A>;
impl<'a, REG, const O: u8> LOCK_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "No bit is write protected"]
#[inline(always)]
pub fn off(self) -> &'a mut crate::W<REG> {
self.variant(LOCK_A::Off)
}
#[doc = "Any bits except MOE, OSSR, OSSI and LOCK in TIMx_BDTR register, OISx and OISxN bits in TIMx_CR2 register can no longer be written"]
#[inline(always)]
pub fn level1(self) -> &'a mut crate::W<REG> {
self.variant(LOCK_A::Level1)
}
#[doc = "LOCK Level 1 + CC Polarity bits (CCxP/CCxNP bits in TIMx_CCER register, as long as the related channel is configured in output through the CCxS bits) as well as OSSR and OSSI bits can no longer be written"]
#[inline(always)]
pub fn level2(self) -> &'a mut crate::W<REG> {
self.variant(LOCK_A::Level2)
}
#[doc = "LOCK Level 2 + CC Control bits (OCxM and OCxPE bits in TIMx_CCMRx registers, as long as the related channel is configured in output through the CCxS bits) can no longer be written"]
#[inline(always)]
pub fn level3(self) -> &'a mut crate::W<REG> {
self.variant(LOCK_A::Level3)
}
}
#[doc = "Field `OSSI` reader - Off-state selection for Idle mode"]
pub type OSSI_R = crate::BitReader<OSSI_A>;
#[doc = "Off-state selection for Idle mode\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum OSSI_A {
#[doc = "0: When inactive, OC/OCN outputs are disabled"]
Disabled = 0,
#[doc = "1: When inactive, OC/OCN outputs are forced to idle level"]
IdleLevel = 1,
}
impl From<OSSI_A> for bool {
#[inline(always)]
fn from(variant: OSSI_A) -> Self {
variant as u8 != 0
}
}
impl OSSI_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> OSSI_A {
match self.bits {
false => OSSI_A::Disabled,
true => OSSI_A::IdleLevel,
}
}
#[doc = "When inactive, OC/OCN outputs are disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == OSSI_A::Disabled
}
#[doc = "When inactive, OC/OCN outputs are forced to idle level"]
#[inline(always)]
pub fn is_idle_level(&self) -> bool {
*self == OSSI_A::IdleLevel
}
}
#[doc = "Field `OSSI` writer - Off-state selection for Idle mode"]
pub type OSSI_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, OSSI_A>;
impl<'a, REG, const O: u8> OSSI_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "When inactive, OC/OCN outputs are disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(OSSI_A::Disabled)
}
#[doc = "When inactive, OC/OCN outputs are forced to idle level"]
#[inline(always)]
pub fn idle_level(self) -> &'a mut crate::W<REG> {
self.variant(OSSI_A::IdleLevel)
}
}
#[doc = "Field `OSSR` reader - Off-state selection for Run mode"]
pub type OSSR_R = crate::BitReader<OSSR_A>;
#[doc = "Off-state selection for Run mode\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum OSSR_A {
#[doc = "0: When inactive, OC/OCN outputs are disabled"]
Disabled = 0,
#[doc = "1: When inactive, OC/OCN outputs are enabled with their inactive level"]
IdleLevel = 1,
}
impl From<OSSR_A> for bool {
#[inline(always)]
fn from(variant: OSSR_A) -> Self {
variant as u8 != 0
}
}
impl OSSR_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> OSSR_A {
match self.bits {
false => OSSR_A::Disabled,
true => OSSR_A::IdleLevel,
}
}
#[doc = "When inactive, OC/OCN outputs are disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == OSSR_A::Disabled
}
#[doc = "When inactive, OC/OCN outputs are enabled with their inactive level"]
#[inline(always)]
pub fn is_idle_level(&self) -> bool {
*self == OSSR_A::IdleLevel
}
}
#[doc = "Field `OSSR` writer - Off-state selection for Run mode"]
pub type OSSR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, OSSR_A>;
impl<'a, REG, const O: u8> OSSR_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "When inactive, OC/OCN outputs are disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(OSSR_A::Disabled)
}
#[doc = "When inactive, OC/OCN outputs are enabled with their inactive level"]
#[inline(always)]
pub fn idle_level(self) -> &'a mut crate::W<REG> {
self.variant(OSSR_A::IdleLevel)
}
}
#[doc = "Field `BKE` reader - Break enable"]
pub type BKE_R = crate::BitReader<BKE_A>;
#[doc = "Break enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BKE_A {
#[doc = "0: Break function x disabled"]
Disabled = 0,
#[doc = "1: Break function x disabled"]
Enabled = 1,
}
impl From<BKE_A> for bool {
#[inline(always)]
fn from(variant: BKE_A) -> Self {
variant as u8 != 0
}
}
impl BKE_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BKE_A {
match self.bits {
false => BKE_A::Disabled,
true => BKE_A::Enabled,
}
}
#[doc = "Break function x disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == BKE_A::Disabled
}
#[doc = "Break function x disabled"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == BKE_A::Enabled
}
}
#[doc = "Field `BKE` writer - Break enable"]
pub type BKE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, BKE_A>;
impl<'a, REG, const O: u8> BKE_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Break function x disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(BKE_A::Disabled)
}
#[doc = "Break function x disabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(BKE_A::Enabled)
}
}
#[doc = "Field `BKP` reader - Break polarity"]
pub type BKP_R = crate::BitReader<BKP_A>;
#[doc = "Break polarity\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BKP_A {
#[doc = "0: Break input BRKx is active low"]
ActiveLow = 0,
#[doc = "1: Break input BRKx is active high"]
ActiveHigh = 1,
}
impl From<BKP_A> for bool {
#[inline(always)]
fn from(variant: BKP_A) -> Self {
variant as u8 != 0
}
}
impl BKP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BKP_A {
match self.bits {
false => BKP_A::ActiveLow,
true => BKP_A::ActiveHigh,
}
}
#[doc = "Break input BRKx is active low"]
#[inline(always)]
pub fn is_active_low(&self) -> bool {
*self == BKP_A::ActiveLow
}
#[doc = "Break input BRKx is active high"]
#[inline(always)]
pub fn is_active_high(&self) -> bool {
*self == BKP_A::ActiveHigh
}
}
#[doc = "Field `BKP` writer - Break polarity"]
pub type BKP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, BKP_A>;
impl<'a, REG, const O: u8> BKP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Break input BRKx is active low"]
#[inline(always)]
pub fn active_low(self) -> &'a mut crate::W<REG> {
self.variant(BKP_A::ActiveLow)
}
#[doc = "Break input BRKx is active high"]
#[inline(always)]
pub fn active_high(self) -> &'a mut crate::W<REG> {
self.variant(BKP_A::ActiveHigh)
}
}
#[doc = "Field `AOE` reader - Automatic output enable"]
pub type AOE_R = crate::BitReader<AOE_A>;
#[doc = "Automatic output enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AOE_A {
#[doc = "0: MOE can be set only by software"]
Manual = 0,
#[doc = "1: MOE can be set by software or automatically at the next update event (if none of the break inputs BRK and BRK2 is active)"]
Automatic = 1,
}
impl From<AOE_A> for bool {
#[inline(always)]
fn from(variant: AOE_A) -> Self {
variant as u8 != 0
}
}
impl AOE_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> AOE_A {
match self.bits {
false => AOE_A::Manual,
true => AOE_A::Automatic,
}
}
#[doc = "MOE can be set only by software"]
#[inline(always)]
pub fn is_manual(&self) -> bool {
*self == AOE_A::Manual
}
#[doc = "MOE can be set by software or automatically at the next update event (if none of the break inputs BRK and BRK2 is active)"]
#[inline(always)]
pub fn is_automatic(&self) -> bool {
*self == AOE_A::Automatic
}
}
#[doc = "Field `AOE` writer - Automatic output enable"]
pub type AOE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, AOE_A>;
impl<'a, REG, const O: u8> AOE_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "MOE can be set only by software"]
#[inline(always)]
pub fn manual(self) -> &'a mut crate::W<REG> {
self.variant(AOE_A::Manual)
}
#[doc = "MOE can be set by software or automatically at the next update event (if none of the break inputs BRK and BRK2 is active)"]
#[inline(always)]
pub fn automatic(self) -> &'a mut crate::W<REG> {
self.variant(AOE_A::Automatic)
}
}
#[doc = "Field `MOE` reader - Main output enable"]
pub type MOE_R = crate::BitReader<MOE_A>;
#[doc = "Main output enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum MOE_A {
#[doc = "0: OC/OCN are disabled or forced idle depending on OSSI"]
DisabledIdle = 0,
#[doc = "1: OC/OCN are enabled if CCxE/CCxNE are set"]
Enabled = 1,
}
impl From<MOE_A> for bool {
#[inline(always)]
fn from(variant: MOE_A) -> Self {
variant as u8 != 0
}
}
impl MOE_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> MOE_A {
match self.bits {
false => MOE_A::DisabledIdle,
true => MOE_A::Enabled,
}
}
#[doc = "OC/OCN are disabled or forced idle depending on OSSI"]
#[inline(always)]
pub fn is_disabled_idle(&self) -> bool {
*self == MOE_A::DisabledIdle
}
#[doc = "OC/OCN are enabled if CCxE/CCxNE are set"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == MOE_A::Enabled
}
}
#[doc = "Field `MOE` writer - Main output enable"]
pub type MOE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, MOE_A>;
impl<'a, REG, const O: u8> MOE_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "OC/OCN are disabled or forced idle depending on OSSI"]
#[inline(always)]
pub fn disabled_idle(self) -> &'a mut crate::W<REG> {
self.variant(MOE_A::DisabledIdle)
}
#[doc = "OC/OCN are enabled if CCxE/CCxNE are set"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(MOE_A::Enabled)
}
}
#[doc = "Field `BKF` reader - Break filter"]
pub type BKF_R = crate::FieldReader;
#[doc = "Field `BKF` writer - Break filter"]
pub type BKF_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `BK2F` reader - Break 2 filter"]
pub type BK2F_R = crate::FieldReader;
#[doc = "Field `BK2F` writer - Break 2 filter"]
pub type BK2F_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `BK2E` reader - Break 2 enable"]
pub use BKE_R as BK2E_R;
#[doc = "Field `BK2E` writer - Break 2 enable"]
pub use BKE_W as BK2E_W;
#[doc = "Field `BK2P` reader - Break 2 polarity"]
pub use BKP_R as BK2P_R;
#[doc = "Field `BK2P` writer - Break 2 polarity"]
pub use BKP_W as BK2P_W;
impl R {
#[doc = "Bits 0:7 - Dead-time generator setup"]
#[inline(always)]
pub fn dtg(&self) -> DTG_R {
DTG_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bits 8:9 - Lock configuration"]
#[inline(always)]
pub fn lock(&self) -> LOCK_R {
LOCK_R::new(((self.bits >> 8) & 3) as u8)
}
#[doc = "Bit 10 - Off-state selection for Idle mode"]
#[inline(always)]
pub fn ossi(&self) -> OSSI_R {
OSSI_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - Off-state selection for Run mode"]
#[inline(always)]
pub fn ossr(&self) -> OSSR_R {
OSSR_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - Break enable"]
#[inline(always)]
pub fn bke(&self) -> BKE_R {
BKE_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 13 - Break polarity"]
#[inline(always)]
pub fn bkp(&self) -> BKP_R {
BKP_R::new(((self.bits >> 13) & 1) != 0)
}
#[doc = "Bit 14 - Automatic output enable"]
#[inline(always)]
pub fn aoe(&self) -> AOE_R {
AOE_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 15 - Main output enable"]
#[inline(always)]
pub fn moe(&self) -> MOE_R {
MOE_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bits 16:19 - Break filter"]
#[inline(always)]
pub fn bkf(&self) -> BKF_R {
BKF_R::new(((self.bits >> 16) & 0x0f) as u8)
}
#[doc = "Bits 20:23 - Break 2 filter"]
#[inline(always)]
pub fn bk2f(&self) -> BK2F_R {
BK2F_R::new(((self.bits >> 20) & 0x0f) as u8)
}
#[doc = "Bit 24 - Break 2 enable"]
#[inline(always)]
pub fn bk2e(&self) -> BK2E_R {
BK2E_R::new(((self.bits >> 24) & 1) != 0)
}
#[doc = "Bit 25 - Break 2 polarity"]
#[inline(always)]
pub fn bk2p(&self) -> BK2P_R {
BK2P_R::new(((self.bits >> 25) & 1) != 0)
}
}
impl W {
#[doc = "Bits 0:7 - Dead-time generator setup"]
#[inline(always)]
#[must_use]
pub fn dtg(&mut self) -> DTG_W<BDTR_SPEC, 0> {
DTG_W::new(self)
}
#[doc = "Bits 8:9 - Lock configuration"]
#[inline(always)]
#[must_use]
pub fn lock(&mut self) -> LOCK_W<BDTR_SPEC, 8> {
LOCK_W::new(self)
}
#[doc = "Bit 10 - Off-state selection for Idle mode"]
#[inline(always)]
#[must_use]
pub fn ossi(&mut self) -> OSSI_W<BDTR_SPEC, 10> {
OSSI_W::new(self)
}
#[doc = "Bit 11 - Off-state selection for Run mode"]
#[inline(always)]
#[must_use]
pub fn ossr(&mut self) -> OSSR_W<BDTR_SPEC, 11> {
OSSR_W::new(self)
}
#[doc = "Bit 12 - Break enable"]
#[inline(always)]
#[must_use]
pub fn bke(&mut self) -> BKE_W<BDTR_SPEC, 12> {
BKE_W::new(self)
}
#[doc = "Bit 13 - Break polarity"]
#[inline(always)]
#[must_use]
pub fn bkp(&mut self) -> BKP_W<BDTR_SPEC, 13> {
BKP_W::new(self)
}
#[doc = "Bit 14 - Automatic output enable"]
#[inline(always)]
#[must_use]
pub fn aoe(&mut self) -> AOE_W<BDTR_SPEC, 14> {
AOE_W::new(self)
}
#[doc = "Bit 15 - Main output enable"]
#[inline(always)]
#[must_use]
pub fn moe(&mut self) -> MOE_W<BDTR_SPEC, 15> {
MOE_W::new(self)
}
#[doc = "Bits 16:19 - Break filter"]
#[inline(always)]
#[must_use]
pub fn bkf(&mut self) -> BKF_W<BDTR_SPEC, 16> {
BKF_W::new(self)
}
#[doc = "Bits 20:23 - Break 2 filter"]
#[inline(always)]
#[must_use]
pub fn bk2f(&mut self) -> BK2F_W<BDTR_SPEC, 20> {
BK2F_W::new(self)
}
#[doc = "Bit 24 - Break 2 enable"]
#[inline(always)]
#[must_use]
pub fn bk2e(&mut self) -> BK2E_W<BDTR_SPEC, 24> {
BK2E_W::new(self)
}
#[doc = "Bit 25 - Break 2 polarity"]
#[inline(always)]
#[must_use]
pub fn bk2p(&mut self) -> BK2P_W<BDTR_SPEC, 25> {
BK2P_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "break and dead-time register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`bdtr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`bdtr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct BDTR_SPEC;
impl crate::RegisterSpec for BDTR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`bdtr::R`](R) reader structure"]
impl crate::Readable for BDTR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`bdtr::W`](W) writer structure"]
impl crate::Writable for BDTR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets BDTR to value 0"]
impl crate::Resettable for BDTR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use {ContextRef, ValueRef};
cpp! {
#include "ffi_helpers.h"
#include "llvm/IR/Module.h"
pub fn LLVMRustBasicBlockCreate(context: ContextRef as "llvm::LLVMContext*")
-> ValueRef as "llvm::Value*" {
return llvm::BasicBlock::Create(*context);
}
}
|
mod debugger;
mod load_cell;
mod load_cell_data;
mod load_header;
mod load_input;
mod load_script;
mod load_script_hash;
mod load_tx;
mod load_witness;
mod utils;
pub use self::debugger::Debugger;
pub use self::load_cell::LoadCell;
pub use self::load_cell_data::LoadCellData;
pub use self::load_header::LoadHeader;
pub use self::load_input::LoadInput;
pub use self::load_script::LoadScript;
pub use self::load_script_hash::LoadScriptHash;
pub use self::load_tx::LoadTx;
pub use self::load_witness::LoadWitness;
use ckb_vm::Error;
pub const SUCCESS: u8 = 0;
// INDEX_OUT_OF_BOUND is returned when requesting the 4th output in a transaction
// with only 3 outputs; while ITEM_MISSING is returned when requesting (for example)
// the type field on an output without type script, or requesting the cell data
// for a dep OutPoint which only references a block header.
pub const INDEX_OUT_OF_BOUND: u8 = 1;
pub const ITEM_MISSING: u8 = 2;
pub const SLICE_OUT_OF_BOUND: u8 = 3;
pub const LOAD_TRANSACTION_SYSCALL_NUMBER: u64 = 2051;
pub const LOAD_SCRIPT_SYSCALL_NUMBER: u64 = 2052;
pub const LOAD_TX_HASH_SYSCALL_NUMBER: u64 = 2061;
pub const LOAD_SCRIPT_HASH_SYSCALL_NUMBER: u64 = 2062;
pub const LOAD_CELL_SYSCALL_NUMBER: u64 = 2071;
pub const LOAD_HEADER_SYSCALL_NUMBER: u64 = 2072;
pub const LOAD_INPUT_SYSCALL_NUMBER: u64 = 2073;
pub const LOAD_WITNESS_SYSCALL_NUMBER: u64 = 2074;
pub const LOAD_CELL_BY_FIELD_SYSCALL_NUMBER: u64 = 2081;
pub const LOAD_HEADER_BY_FIELD_SYSCALL_NUMBER: u64 = 2082;
pub const LOAD_INPUT_BY_FIELD_SYSCALL_NUMBER: u64 = 2083;
pub const LOAD_CELL_DATA_AS_CODE_SYSCALL_NUMBER: u64 = 2091;
pub const LOAD_CELL_DATA_SYSCALL_NUMBER: u64 = 2092;
pub const DEBUG_PRINT_SYSCALL_NUMBER: u64 = 2177;
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
enum CellField {
Capacity = 0,
DataHash = 1,
Lock = 2,
LockHash = 3,
Type = 4,
TypeHash = 5,
OccupiedCapacity = 6,
}
impl CellField {
fn parse_from_u64(i: u64) -> Result<CellField, Error> {
match i {
0 => Ok(CellField::Capacity),
1 => Ok(CellField::DataHash),
2 => Ok(CellField::Lock),
3 => Ok(CellField::LockHash),
4 => Ok(CellField::Type),
5 => Ok(CellField::TypeHash),
6 => Ok(CellField::OccupiedCapacity),
_ => Err(Error::ParseError),
}
}
}
// While all fields here share the same prefix for now, later
// we might add other fields from the header which won't have
// this prefix.
#[allow(clippy::enum_variant_names)]
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
enum HeaderField {
EpochNumber = 0,
EpochStartBlockNumber = 1,
EpochLength = 2,
}
impl HeaderField {
fn parse_from_u64(i: u64) -> Result<HeaderField, Error> {
match i {
0 => Ok(HeaderField::EpochNumber),
1 => Ok(HeaderField::EpochStartBlockNumber),
2 => Ok(HeaderField::EpochLength),
_ => Err(Error::ParseError),
}
}
}
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
enum InputField {
OutPoint = 0,
Since = 1,
}
impl InputField {
fn parse_from_u64(i: u64) -> Result<InputField, Error> {
match i {
0 => Ok(InputField::OutPoint),
1 => Ok(InputField::Since),
_ => Err(Error::ParseError),
}
}
}
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
enum SourceEntry {
Input,
Output,
// Cell dep
CellDep,
// Header dep
HeaderDep,
}
impl From<SourceEntry> for u64 {
fn from(s: SourceEntry) -> u64 {
match s {
SourceEntry::Input => 1,
SourceEntry::Output => 2,
SourceEntry::CellDep => 3,
SourceEntry::HeaderDep => 4,
}
}
}
impl SourceEntry {
fn parse_from_u64(i: u64) -> Result<SourceEntry, Error> {
match i {
1 => Ok(SourceEntry::Input),
2 => Ok(SourceEntry::Output),
3 => Ok(SourceEntry::CellDep),
4 => Ok(SourceEntry::HeaderDep),
_ => Err(Error::ParseError),
}
}
}
const SOURCE_GROUP_FLAG: u64 = 0x0100_0000_0000_0000;
const SOURCE_GROUP_MASK: u64 = 0xFF00_0000_0000_0000;
const SOURCE_ENTRY_MASK: u64 = 0x00FF_FFFF_FFFF_FFFF;
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
enum Source {
Transaction(SourceEntry),
Group(SourceEntry),
}
impl From<Source> for u64 {
fn from(s: Source) -> u64 {
match s {
Source::Transaction(e) => u64::from(e),
Source::Group(e) => SOURCE_GROUP_FLAG | u64::from(e),
}
}
}
impl Source {
fn parse_from_u64(i: u64) -> Result<Source, Error> {
let entry = SourceEntry::parse_from_u64(i & SOURCE_ENTRY_MASK)?;
if i & SOURCE_GROUP_MASK == SOURCE_GROUP_FLAG {
Ok(Source::Group(entry))
} else {
Ok(Source::Transaction(entry))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::DataLoader;
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use ckb_db::RocksDB;
use ckb_hash::blake2b_256;
use ckb_store::{data_loader_wrapper::DataLoaderWrapper, ChainDB, COLUMNS};
use ckb_types::{
bytes::Bytes,
core::{
cell::CellMeta, BlockExt, Capacity, EpochExt, HeaderBuilder, HeaderView,
ScriptHashType, TransactionBuilder,
},
packed::{Byte32, CellOutput, OutPoint, Script, ScriptBuilder},
prelude::*,
utilities::DIFF_TWO,
H256, U256,
};
use ckb_vm::machine::DefaultCoreMachine;
use ckb_vm::{
memory::{FLAG_EXECUTABLE, FLAG_FREEZED, FLAG_WRITABLE},
registers::{A0, A1, A2, A3, A4, A5, A7},
CoreMachine, Error as VMError, Memory, SparseMemory, Syscalls, WXorXMemory, RISCV_PAGESIZE,
};
use proptest::{collection::size_range, prelude::*};
use std::collections::HashMap;
fn new_store() -> ChainDB {
ChainDB::new(RocksDB::open_tmp(COLUMNS), Default::default())
}
fn build_cell_meta(capacity_bytes: usize, data: Bytes) -> CellMeta {
let capacity = Capacity::bytes(capacity_bytes).expect("capacity bytes overflow");
let builder = CellOutput::new_builder().capacity(capacity.pack());
let data_hash = CellOutput::calc_data_hash(&data);
CellMeta {
out_point: OutPoint::default(),
transaction_info: None,
cell_output: builder.build(),
data_bytes: data.len() as u64,
mem_cell_data: Some((data, data_hash)),
}
}
fn _test_load_cell_not_exist(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A3, 1); //index
machine.set_register(A4, u64::from(Source::Transaction(SourceEntry::Input))); //source: 1 input
machine.set_register(A7, LOAD_CELL_SYSCALL_NUMBER); // syscall number
prop_assert!(machine
.memory_mut()
.store64(&size_addr, &(data.len() as u64))
.is_ok());
let output_cell_data = Bytes::from(data);
let output = build_cell_meta(100, output_cell_data);
let input_cell_data: Bytes = data.iter().rev().cloned().collect();
let input_cell = build_cell_meta(100, input_cell_data);
let outputs = vec![output];
let resolved_inputs = vec![input_cell];
let resolved_cell_deps = vec![];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_cell = LoadCell::new(
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
prop_assert!(load_cell.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(INDEX_OUT_OF_BOUND));
Ok(())
}
proptest! {
#[test]
fn test_load_cell_not_exist(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_cell_not_exist(data)?;
}
}
fn _test_load_cell_all(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A3, 0); //index
machine.set_register(A4, u64::from(Source::Transaction(SourceEntry::Input))); //source: 1 input
machine.set_register(A7, LOAD_CELL_SYSCALL_NUMBER); // syscall number
let output_cell_data = Bytes::from(data);
let output = build_cell_meta(100, output_cell_data);
let input_cell_data: Bytes = data.iter().rev().cloned().collect();
let input_cell = build_cell_meta(100, input_cell_data);
let outputs = vec![output.clone()];
let resolved_inputs = vec![input_cell.clone()];
let resolved_cell_deps = vec![];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_cell = LoadCell::new(
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
let input_correct_data = input_cell.cell_output.as_slice();
let output_correct_data = output.cell_output.as_slice();
// test input
prop_assert!(machine
.memory_mut()
.store64(&size_addr, &(input_correct_data.len() as u64))
.is_ok());
prop_assert!(load_cell.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(
machine.memory_mut().load64(&size_addr),
Ok(input_correct_data.len() as u64)
);
for (i, addr) in (addr..addr + input_correct_data.len() as u64).enumerate() {
prop_assert_eq!(
machine.memory_mut().load8(&addr),
Ok(u64::from(input_correct_data[i]))
);
}
// clean memory
prop_assert!(machine.memory_mut().store_byte(0, 1100, 0).is_ok());
// test output
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A4, u64::from(Source::Transaction(SourceEntry::Output))); //source: 2 output
prop_assert!(machine
.memory_mut()
.store64(&size_addr, &(output_correct_data.len() as u64 + 10))
.is_ok());
prop_assert!(load_cell.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(
machine.memory_mut().load64(&size_addr),
Ok(output_correct_data.len() as u64)
);
for (i, addr) in (addr..addr + output_correct_data.len() as u64).enumerate() {
prop_assert_eq!(
machine.memory_mut().load8(&addr),
Ok(u64::from(output_correct_data[i]))
);
}
Ok(())
}
proptest! {
#[test]
fn test_load_cell_all(ref tx in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_cell_all(tx)?;
}
}
fn _test_load_cell_length(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A3, 0); //index
machine.set_register(A4, u64::from(Source::Transaction(SourceEntry::Input))); //source: 1 input
machine.set_register(A7, LOAD_CELL_SYSCALL_NUMBER); // syscall number
let output_cell_data = Bytes::from(data);
let output = build_cell_meta(100, output_cell_data);
let input_cell_data: Bytes = data.iter().rev().cloned().collect();
let input_cell = build_cell_meta(100, input_cell_data);
let outputs = vec![output];
let resolved_inputs = vec![input_cell.clone()];
let resolved_cell_deps = vec![];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_cell = LoadCell::new(
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
let input_correct_data = input_cell.cell_output.as_slice();
prop_assert!(machine.memory_mut().store64(&size_addr, &0).is_ok());
prop_assert!(load_cell.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(
machine.memory_mut().load64(&size_addr),
Ok(input_correct_data.len() as u64)
);
Ok(())
}
proptest! {
#[test]
fn test_load_cell_length(ref tx in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_cell_length(tx)?;
}
}
fn _test_load_cell_partial(data: &[u8], offset: u64) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, offset); // offset
machine.set_register(A3, 0); // index
machine.set_register(A4, u64::from(Source::Transaction(SourceEntry::Input))); // source: 1 input
machine.set_register(A7, LOAD_CELL_SYSCALL_NUMBER); // syscall number
let output_cell_data = Bytes::from(data);
let output = build_cell_meta(100, output_cell_data);
let input_cell_data: Bytes = data.iter().rev().cloned().collect();
let input_cell = build_cell_meta(100, input_cell_data);
let outputs = vec![output];
let resolved_inputs = vec![input_cell.clone()];
let resolved_cell_deps = vec![];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_cell = LoadCell::new(
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
let input_correct_data = input_cell.cell_output.as_slice();
prop_assert!(machine
.memory_mut()
.store64(&size_addr, &(input_correct_data.len() as u64))
.is_ok());
prop_assert!(load_cell.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
for (i, addr) in
(addr..addr + (input_correct_data.len() as u64).saturating_sub(offset)).enumerate()
{
prop_assert_eq!(
machine.memory_mut().load8(&addr),
Ok(u64::from(input_correct_data[i + offset as usize]))
);
}
Ok(())
}
proptest! {
#[test]
fn test_load_cell_partial(ref data in any_with::<Vec<u8>>(size_range(1000).lift()), offset in 0u64..2000) {
_test_load_cell_partial(data, offset)?;
}
}
fn _test_load_cell_capacity(capacity: Capacity) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A3, 0); //index
machine.set_register(A4, u64::from(Source::Transaction(SourceEntry::Input))); //source: 1 input
machine.set_register(A5, CellField::Capacity as u64); //field: 0 capacity
machine.set_register(A7, LOAD_CELL_BY_FIELD_SYSCALL_NUMBER); // syscall number
let data = Bytes::new();
let data_hash = CellOutput::calc_data_hash(&data);
let input_cell = CellMeta {
out_point: OutPoint::default(),
transaction_info: None,
cell_output: CellOutput::new_builder().capacity(capacity.pack()).build(),
data_bytes: 0,
mem_cell_data: Some((data, data_hash)),
};
let outputs = vec![];
let resolved_inputs = vec![input_cell];
let resolved_cell_deps = vec![];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_cell = LoadCell::new(
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
prop_assert!(machine.memory_mut().store64(&size_addr, &16).is_ok());
prop_assert!(load_cell.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(machine.memory_mut().load64(&size_addr), Ok(8));
let mut buffer = vec![];
buffer.write_u64::<LittleEndian>(capacity.as_u64()).unwrap();
for (i, addr) in (addr..addr + buffer.len() as u64).enumerate() {
prop_assert_eq!(machine.memory_mut().load8(&addr), Ok(u64::from(buffer[i])));
}
Ok(())
}
proptest! {
#[test]
fn test_load_cell_capacity(capacity in any::<u64>()) {
_test_load_cell_capacity(Capacity::shannons(capacity))?;
}
}
#[test]
fn test_load_missing_contract() {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A3, 0); //index
machine.set_register(A4, u64::from(Source::Transaction(SourceEntry::Output))); //source: 2 output
machine.set_register(A5, CellField::Type as u64); //field: 4 type
machine.set_register(A7, LOAD_CELL_BY_FIELD_SYSCALL_NUMBER); // syscall number
let output_cell = build_cell_meta(100, Bytes::new());
let outputs = vec![output_cell];
let resolved_inputs = vec![];
let resolved_cell_deps = vec![];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_cell = LoadCell::new(
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
assert!(machine.memory_mut().store64(&size_addr, &100).is_ok());
assert!(load_cell.ecall(&mut machine).is_ok());
assert_eq!(machine.registers()[A0], u64::from(ITEM_MISSING));
assert_eq!(machine.memory_mut().load64(&size_addr), Ok(100));
for addr in addr..addr + 100 {
assert_eq!(machine.memory_mut().load8(&addr), Ok(0));
}
}
struct MockDataLoader {
headers: HashMap<Byte32, HeaderView>,
epochs: HashMap<Byte32, EpochExt>,
}
impl DataLoader for MockDataLoader {
fn load_cell_data(&self, _cell: &CellMeta) -> Option<(Bytes, Byte32)> {
None
}
fn get_block_ext(&self, _block_hash: &Byte32) -> Option<BlockExt> {
None
}
fn get_header(&self, block_hash: &Byte32) -> Option<HeaderView> {
self.headers.get(block_hash).cloned()
}
fn get_block_epoch(&self, block_hash: &Byte32) -> Option<EpochExt> {
self.epochs.get(block_hash).cloned()
}
}
fn _test_load_header(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A3, 0); //index
machine.set_register(A4, u64::from(Source::Transaction(SourceEntry::HeaderDep))); //source: 4 header
machine.set_register(A7, LOAD_HEADER_SYSCALL_NUMBER); // syscall number
let data_hash = blake2b_256(&data).pack();
let header = HeaderBuilder::default()
.transactions_root(data_hash)
.build();
let header_correct_bytes = header.data();
let header_correct_data = header_correct_bytes.as_slice();
let mut headers = HashMap::default();
headers.insert(header.hash(), header.clone());
let data_loader = MockDataLoader {
headers,
epochs: HashMap::default(),
};
let header_deps = vec![header.hash()];
let resolved_inputs = vec![];
let resolved_cell_deps = vec![];
let group_inputs = vec![];
let mut load_header = LoadHeader::new(
&data_loader,
header_deps.pack(),
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
);
prop_assert!(machine
.memory_mut()
.store64(&size_addr, &(header_correct_data.len() as u64 + 20))
.is_ok());
prop_assert!(load_header.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(
machine.memory_mut().load64(&size_addr),
Ok(header_correct_data.len() as u64)
);
for (i, addr) in (addr..addr + header_correct_data.len() as u64).enumerate() {
prop_assert_eq!(
machine.memory_mut().load8(&addr),
Ok(u64::from(header_correct_data[i]))
);
}
Ok(())
}
proptest! {
#[test]
fn test_load_header(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_header(data)?;
}
}
fn _test_load_epoch_number(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A3, 0); //index
machine.set_register(A4, u64::from(Source::Transaction(SourceEntry::HeaderDep))); //source: 4 header
machine.set_register(A7, LOAD_HEADER_BY_FIELD_SYSCALL_NUMBER); // syscall number
let data_hash: H256 = blake2b_256(&data).into();
let header = HeaderBuilder::default()
.transactions_root(data_hash.pack())
.build();
let epoch = EpochExt::new_builder()
.number(u64::from(data[0]))
.base_block_reward(Capacity::bytes(100).unwrap())
.remainder_reward(Capacity::bytes(100).unwrap())
.previous_epoch_hash_rate(U256::one())
.last_block_hash_in_previous_epoch(Byte32::default())
.start_number(1234)
.length(1000)
.compact_target(DIFF_TWO)
.build();
let mut correct_data = [0u8; 8];
LittleEndian::write_u64(&mut correct_data, epoch.number());
let mut headers = HashMap::default();
headers.insert(header.hash(), header.clone());
let mut epochs = HashMap::default();
epochs.insert(header.hash(), epoch);
let data_loader = MockDataLoader { headers, epochs };
let header_deps = vec![header.hash()];
let resolved_inputs = vec![];
let resolved_cell_deps = vec![];
let group_inputs = vec![];
let mut load_header = LoadHeader::new(
&data_loader,
header_deps.pack(),
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
);
prop_assert!(machine
.memory_mut()
.store64(&size_addr, &(correct_data.len() as u64 + 20))
.is_ok());
prop_assert!(load_header.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(
machine.memory_mut().load64(&size_addr),
Ok(correct_data.len() as u64)
);
for (i, addr) in (addr..addr + correct_data.len() as u64).enumerate() {
prop_assert_eq!(
machine.memory_mut().load8(&addr),
Ok(u64::from(correct_data[i]))
);
}
Ok(())
}
proptest! {
#[test]
fn test_load_epoch_number(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_epoch_number(data)?;
}
}
fn _test_load_tx_hash(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A7, LOAD_TX_HASH_SYSCALL_NUMBER); // syscall number
let transaction_view = TransactionBuilder::default()
.output_data(data.pack())
.build();
let hash = transaction_view.hash();
let hash_len = 32u64;
let mut load_tx = LoadTx::new(&transaction_view);
prop_assert!(machine
.memory_mut()
.store64(&size_addr, &(hash_len + 20))
.is_ok());
prop_assert!(load_tx.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(machine.memory_mut().load64(&size_addr), Ok(hash_len));
for (i, addr) in (addr..addr + hash_len as u64).enumerate() {
prop_assert_eq!(
machine.memory_mut().load8(&addr),
Ok(u64::from(hash.as_slice()[i]))
);
}
Ok(())
}
proptest! {
#[test]
fn test_load_tx_hash(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_tx_hash(data)?;
}
}
fn _test_load_tx(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A7, LOAD_TRANSACTION_SYSCALL_NUMBER); // syscall number
let transaction_view = TransactionBuilder::default()
.output_data(data.pack())
.build();
let tx = transaction_view.data();
let tx_len = transaction_view.data().as_slice().len() as u64;
let mut load_tx = LoadTx::new(&transaction_view);
prop_assert!(machine
.memory_mut()
.store64(&size_addr, &(tx_len + 20))
.is_ok());
prop_assert!(load_tx.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(machine.memory_mut().load64(&size_addr), Ok(tx_len));
for (i, addr) in (addr..addr + tx_len as u64).enumerate() {
prop_assert_eq!(
machine.memory_mut().load8(&addr),
Ok(u64::from(tx.as_slice()[i]))
);
}
Ok(())
}
proptest! {
#[test]
fn test_load_tx(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_tx(data)?;
}
}
fn _test_load_current_script_hash(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A7, LOAD_SCRIPT_HASH_SYSCALL_NUMBER); // syscall number
let script = Script::new_builder()
.args(Bytes::from(data).pack())
.hash_type(ScriptHashType::Data.into())
.build();
let hash = script.calc_script_hash();
let data = hash.raw_data();
let mut load_script_hash = LoadScriptHash::new(hash);
prop_assert!(machine.memory_mut().store64(&size_addr, &64).is_ok());
prop_assert!(load_script_hash.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(
machine.memory_mut().load64(&size_addr),
Ok(data.len() as u64)
);
for (i, addr) in (addr..addr + data.len() as u64).enumerate() {
prop_assert_eq!(machine.memory_mut().load8(&addr), Ok(u64::from(data[i])));
}
machine.set_register(A0, addr); // addr
prop_assert!(machine.memory_mut().store64(&size_addr, &0).is_ok());
prop_assert!(load_script_hash.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(
machine.memory_mut().load64(&size_addr),
Ok(data.len() as u64)
);
Ok(())
}
proptest! {
#[test]
fn test_load_current_script_hash(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_current_script_hash(data)?;
}
}
fn _test_load_input_lock_script_hash(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A3, 0); //index
machine.set_register(A4, u64::from(Source::Transaction(SourceEntry::Input))); //source: 1 input
machine.set_register(A5, CellField::LockHash as u64); //field: 2 lock hash
machine.set_register(A7, LOAD_CELL_BY_FIELD_SYSCALL_NUMBER); // syscall number
let script = Script::new_builder()
.args(Bytes::from(data).pack())
.hash_type(ScriptHashType::Data.into())
.build();
let h = script.calc_script_hash();
let hash = h.as_bytes();
let mut input_cell = build_cell_meta(1000, Bytes::new());
let output_with_lock = input_cell
.cell_output
.clone()
.as_builder()
.lock(script)
.build();
input_cell.cell_output = output_with_lock;
let outputs = vec![];
let resolved_inputs = vec![input_cell];
let resolved_cell_deps = vec![];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_cell = LoadCell::new(
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
prop_assert!(machine.memory_mut().store64(&size_addr, &64).is_ok());
prop_assert!(load_cell.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(
machine.memory_mut().load64(&size_addr),
Ok(hash.len() as u64)
);
for (i, addr) in (addr..addr + hash.len() as u64).enumerate() {
prop_assert_eq!(machine.memory_mut().load8(&addr), Ok(u64::from(hash[i])));
}
Ok(())
}
proptest! {
#[test]
fn test_load_input_lock_script_hash(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_input_lock_script_hash(data)?;
}
}
fn _test_load_witness(data: &[u8], source: SourceEntry) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A3, 0); //index
machine.set_register(A4, u64::from(Source::Transaction(source))); //source
machine.set_register(A7, LOAD_WITNESS_SYSCALL_NUMBER); // syscall number
let witness = Bytes::from(data).pack();
let witness_correct_data = witness.raw_data();
let witnesses = vec![witness];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_witness = LoadWitness::new(witnesses.pack(), &group_inputs, &group_outputs);
prop_assert!(machine
.memory_mut()
.store64(&size_addr, &(witness_correct_data.len() as u64 + 20))
.is_ok());
prop_assert!(load_witness.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(
machine.memory_mut().load64(&size_addr),
Ok(witness_correct_data.len() as u64)
);
for (i, addr) in (addr..addr + witness_correct_data.len() as u64).enumerate() {
prop_assert_eq!(
machine.memory_mut().load8(&addr),
Ok(u64::from(witness_correct_data[i]))
);
}
Ok(())
}
proptest! {
#[test]
fn test_load_witness_by_input(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_witness(data, SourceEntry::Input)?;
}
#[test]
fn test_load_witness_by_output(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_witness(data, SourceEntry::Output)?;
}
}
fn _test_load_group_witness(data: &[u8], source: SourceEntry) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A3, 0); //index
machine.set_register(A4, u64::from(Source::Group(source))); //source
machine.set_register(A7, LOAD_WITNESS_SYSCALL_NUMBER); // syscall number
let witness = Bytes::from(data).pack();
let witness_correct_data = witness.raw_data();
let dummy_witness = Bytes::default().pack();
let witnesses = vec![dummy_witness, witness];
let group_inputs = vec![1];
let group_outputs = vec![1];
let mut load_witness = LoadWitness::new(witnesses.pack(), &group_inputs, &group_outputs);
prop_assert!(machine
.memory_mut()
.store64(&size_addr, &(witness_correct_data.len() as u64 + 20))
.is_ok());
prop_assert!(load_witness.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(
machine.memory_mut().load64(&size_addr),
Ok(witness_correct_data.len() as u64)
);
for (i, addr) in (addr..addr + witness_correct_data.len() as u64).enumerate() {
prop_assert_eq!(
machine.memory_mut().load8(&addr),
Ok(u64::from(witness_correct_data[i]))
);
}
Ok(())
}
proptest! {
#[test]
fn test_load_group_witness_by_input(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_group_witness(data, SourceEntry::Input)?;
}
fn test_load_group_witness_by_output(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_group_witness(data, SourceEntry::Output)?;
}
}
fn _test_load_script(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, SparseMemory<u64>>::default();
let size_addr: u64 = 0;
let addr: u64 = 100;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size_addr
machine.set_register(A2, 0); // offset
machine.set_register(A7, LOAD_SCRIPT_SYSCALL_NUMBER); // syscall number
let script = ScriptBuilder::default()
.args(Bytes::from(data).pack())
.build();
let script_correct_data = script.as_slice();
let mut load_script = LoadScript::new(script.clone());
prop_assert!(machine
.memory_mut()
.store64(&size_addr, &(script_correct_data.len() as u64 + 20))
.is_ok());
prop_assert!(load_script.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
prop_assert_eq!(
machine.memory_mut().load64(&size_addr),
Ok(script_correct_data.len() as u64)
);
for (i, addr) in (addr..addr + script_correct_data.len() as u64).enumerate() {
prop_assert_eq!(
machine.memory_mut().load8(&addr),
Ok(u64::from(script_correct_data[i]))
);
}
Ok(())
}
proptest! {
#[test]
fn test_load_script(ref data in any_with::<Vec<u8>>(size_range(1000).lift())) {
_test_load_script(data)?;
}
}
fn _test_load_cell_data_as_code(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, WXorXMemory<u64, SparseMemory<u64>>>::default();
let addr = 4096;
let addr_size = 4096;
machine.set_register(A0, addr); // addr
machine.set_register(A1, addr_size); // size
machine.set_register(A2, 0); // content offset
machine.set_register(A3, data.len() as u64); // content size
machine.set_register(A4, 0); //index
machine.set_register(A5, u64::from(Source::Transaction(SourceEntry::CellDep))); //source
machine.set_register(A7, LOAD_CELL_DATA_AS_CODE_SYSCALL_NUMBER); // syscall number
let dep_cell_data = Bytes::from(data);
let dep_cell = build_cell_meta(10000, dep_cell_data);
let store = new_store();
let data_loader = DataLoaderWrapper::new(&store);
let outputs = vec![];
let resolved_inputs = vec![];
let resolved_cell_deps = vec![dep_cell];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_code = LoadCellData::new(
&data_loader,
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
prop_assert!(machine.memory_mut().store_byte(addr, addr_size, 1).is_ok());
prop_assert!(load_code.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
let flags = FLAG_EXECUTABLE | FLAG_FREEZED;
prop_assert_eq!(
machine
.memory_mut()
.fetch_flag(addr / RISCV_PAGESIZE as u64),
Ok(flags)
);
for (i, addr) in (addr..addr + data.len() as u64).enumerate() {
prop_assert_eq!(machine.memory_mut().load8(&addr), Ok(u64::from(data[i])));
}
if (data.len() as u64) < addr_size {
for i in (data.len() as u64)..addr_size {
prop_assert_eq!(machine.memory_mut().load8(&(addr + i)), Ok(0));
}
}
Ok(())
}
fn _test_load_cell_data(data: &[u8]) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, WXorXMemory<u64, SparseMemory<u64>>>::default();
let size_addr: u64 = 100;
let addr = 4096;
let addr_size = 4096;
machine.set_register(A0, addr); // addr
machine.set_register(A1, size_addr); // size
machine.set_register(A2, 0); // offset
machine.set_register(A3, 0); //index
machine.set_register(A4, u64::from(Source::Transaction(SourceEntry::CellDep))); //source
machine.set_register(A7, LOAD_CELL_DATA_SYSCALL_NUMBER); // syscall number
prop_assert!(machine.memory_mut().store64(&size_addr, &addr_size).is_ok());
let dep_cell_data = Bytes::from(data);
let dep_cell = build_cell_meta(10000, dep_cell_data);
let store = new_store();
let data_loader = DataLoaderWrapper::new(&store);
let outputs = vec![];
let resolved_inputs = vec![];
let resolved_deps = vec![dep_cell];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_code = LoadCellData::new(
&data_loader,
&outputs,
&resolved_inputs,
&resolved_deps,
&group_inputs,
&group_outputs,
);
prop_assert!(load_code.ecall(&mut machine).is_ok());
prop_assert_eq!(machine.registers()[A0], u64::from(SUCCESS));
let flags = FLAG_WRITABLE;
prop_assert_eq!(
machine
.memory_mut()
.fetch_flag(addr / RISCV_PAGESIZE as u64),
Ok(flags)
);
for (i, addr) in (addr..addr + data.len() as u64).enumerate() {
prop_assert_eq!(machine.memory_mut().load8(&addr), Ok(u64::from(data[i])));
}
if (data.len() as u64) < addr_size {
for i in (data.len() as u64)..addr_size {
prop_assert_eq!(machine.memory_mut().load8(&(addr + i)), Ok(0));
}
}
Ok(())
}
proptest! {
#![proptest_config(ProptestConfig {
cases: 10, .. ProptestConfig::default()
})]
#[test]
fn test_load_code(ref data in any_with::<Vec<u8>>(size_range(4096).lift())) {
_test_load_cell_data_as_code(data)?;
}
#[test]
fn test_load_data(ref data in any_with::<Vec<u8>>(size_range(4096).lift())) {
_test_load_cell_data(data)?;
}
}
#[test]
fn test_load_overflowed_cell_data_as_code() {
let data = vec![0, 1, 2, 3, 4, 5];
let mut machine = DefaultCoreMachine::<u64, WXorXMemory<u64, SparseMemory<u64>>>::default();
let addr = 4096;
let addr_size = 4096;
machine.set_register(A0, addr); // addr
machine.set_register(A1, addr_size); // size
machine.set_register(A2, 3); // content offset
machine.set_register(A3, u64::max_value() - 1); // content size
machine.set_register(A4, 0); //index
machine.set_register(A5, u64::from(Source::Transaction(SourceEntry::CellDep))); //source
machine.set_register(A7, LOAD_CELL_DATA_AS_CODE_SYSCALL_NUMBER); // syscall number
let dep_cell_data = Bytes::from(data);
let dep_cell = build_cell_meta(10000, dep_cell_data);
let store = new_store();
let data_loader = DataLoaderWrapper::new(&store);
let outputs = vec![];
let resolved_inputs = vec![];
let resolved_cell_deps = vec![dep_cell];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_code = LoadCellData::new(
&data_loader,
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
assert!(machine.memory_mut().store_byte(addr, addr_size, 1).is_ok());
let result = load_code.ecall(&mut machine);
assert_eq!(result.unwrap_err(), VMError::OutOfBound);
}
fn _test_load_cell_data_on_freezed_memory(
as_code: bool,
data: &[u8],
) -> Result<(), TestCaseError> {
let mut machine = DefaultCoreMachine::<u64, WXorXMemory<u64, SparseMemory<u64>>>::default();
let addr = 8192;
let addr_size = 4096;
prop_assert!(machine
.memory_mut()
.init_pages(addr, addr_size, FLAG_EXECUTABLE | FLAG_FREEZED, None, 0)
.is_ok());
machine.set_register(A0, addr); // addr
machine.set_register(A1, addr_size); // size
machine.set_register(A2, 0); // content offset
machine.set_register(A3, data.len() as u64); // content size
machine.set_register(A4, 0); //index
machine.set_register(A5, u64::from(Source::Transaction(SourceEntry::CellDep))); //source
let syscall = if as_code {
LOAD_CELL_DATA_AS_CODE_SYSCALL_NUMBER
} else {
LOAD_CELL_DATA_SYSCALL_NUMBER
};
machine.set_register(A7, syscall); // syscall number
let dep_cell_data = Bytes::from(data);
let dep_cell = build_cell_meta(10000, dep_cell_data);
let store = new_store();
let data_loader = DataLoaderWrapper::new(&store);
let outputs = vec![];
let resolved_inputs = vec![];
let resolved_cell_deps = vec![dep_cell];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_code = LoadCellData::new(
&data_loader,
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
prop_assert!(load_code.ecall(&mut machine).is_err());
for i in addr..addr + addr_size {
assert_eq!(machine.memory_mut().load8(&i), Ok(0));
}
Ok(())
}
proptest! {
#![proptest_config(ProptestConfig {
cases: 10, .. ProptestConfig::default()
})]
#[test]
fn test_load_code_on_freezed_memory(ref data in any_with::<Vec<u8>>(size_range(4096).lift())) {
_test_load_cell_data_on_freezed_memory(true, data)?;
}
#[test]
fn test_load_data_on_freezed_memory(ref data in any_with::<Vec<u8>>(size_range(4096).lift())) {
_test_load_cell_data_on_freezed_memory(false, data)?;
}
}
#[test]
fn test_load_code_unaligned_error() {
let mut machine = DefaultCoreMachine::<u64, WXorXMemory<u64, SparseMemory<u64>>>::default();
let addr = 4097;
let addr_size = 4096;
let data = [2; 32];
machine.set_register(A0, addr); // addr
machine.set_register(A1, addr_size); // size
machine.set_register(A2, 0); // content offset
machine.set_register(A3, data.len() as u64); // content size
machine.set_register(A4, 0); //index
machine.set_register(A5, u64::from(Source::Transaction(SourceEntry::CellDep))); //source
machine.set_register(A7, LOAD_CELL_DATA_AS_CODE_SYSCALL_NUMBER); // syscall number
let dep_cell_data = Bytes::from(&data[..]);
let dep_cell = build_cell_meta(10000, dep_cell_data);
let store = new_store();
let data_loader = DataLoaderWrapper::new(&store);
let outputs = vec![];
let resolved_inputs = vec![];
let resolved_cell_deps = vec![dep_cell];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_code = LoadCellData::new(
&data_loader,
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
assert!(machine.memory_mut().store_byte(addr, addr_size, 1).is_ok());
assert!(load_code.ecall(&mut machine).is_err());
for i in addr..addr + addr_size {
assert_eq!(machine.memory_mut().load8(&i), Ok(1));
}
}
#[test]
fn test_load_code_slice_out_of_bound_error() {
let mut machine = DefaultCoreMachine::<u64, WXorXMemory<u64, SparseMemory<u64>>>::default();
let addr = 4096;
let addr_size = 4096;
let data = [2; 32];
machine.set_register(A0, addr); // addr
machine.set_register(A1, addr_size); // size
machine.set_register(A2, 0); // content offset
machine.set_register(A3, data.len() as u64 + 3); // content size
machine.set_register(A4, 0); //index
machine.set_register(A5, u64::from(Source::Transaction(SourceEntry::CellDep))); //source
machine.set_register(A7, LOAD_CELL_DATA_AS_CODE_SYSCALL_NUMBER); // syscall number
let dep_cell_data = Bytes::from(&data[..]);
let dep_cell = build_cell_meta(10000, dep_cell_data);
let store = new_store();
let data_loader = DataLoaderWrapper::new(&store);
let outputs = vec![];
let resolved_inputs = vec![];
let resolved_cell_deps = vec![dep_cell];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_code = LoadCellData::new(
&data_loader,
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
assert!(machine.memory_mut().store_byte(addr, addr_size, 1).is_ok());
assert!(load_code.ecall(&mut machine).is_ok());
assert_eq!(machine.registers()[A0], u64::from(SLICE_OUT_OF_BOUND));
for i in addr..addr + addr_size {
assert_eq!(machine.memory_mut().load8(&i), Ok(1));
}
}
#[test]
fn test_load_code_not_enough_space_error() {
let mut machine = DefaultCoreMachine::<u64, WXorXMemory<u64, SparseMemory<u64>>>::default();
let addr = 4096;
let addr_size = 4096;
let mut data = vec![];
data.resize(8000, 2);
machine.set_register(A0, addr); // addr
machine.set_register(A1, addr_size); // size
machine.set_register(A2, 0); // content offset
machine.set_register(A3, data.len() as u64); // content size
machine.set_register(A4, 0); //index
machine.set_register(A5, u64::from(Source::Transaction(SourceEntry::CellDep))); //source
machine.set_register(A7, LOAD_CELL_DATA_AS_CODE_SYSCALL_NUMBER); // syscall number
let dep_cell_data = Bytes::from(&data[..]);
let dep_cell = build_cell_meta(10000, dep_cell_data);
let store = new_store();
let data_loader = DataLoaderWrapper::new(&store);
let outputs = vec![];
let resolved_inputs = vec![];
let resolved_cell_deps = vec![dep_cell];
let group_inputs = vec![];
let group_outputs = vec![];
let mut load_code = LoadCellData::new(
&data_loader,
&outputs,
&resolved_inputs,
&resolved_cell_deps,
&group_inputs,
&group_outputs,
);
assert!(machine.memory_mut().store_byte(addr, addr_size, 1).is_ok());
assert!(load_code.ecall(&mut machine).is_ok());
assert_eq!(machine.registers()[A0], u64::from(SLICE_OUT_OF_BOUND));
for i in addr..addr + addr_size {
assert_eq!(machine.memory_mut().load8(&i), Ok(1));
}
}
}
|
use futures::{Async, Future, Poll};
use std::marker::PhantomData;
use std::net::SocketAddr;
use tokio_io::{AsyncRead, AsyncWrite};
pub trait ConnectService<A> {
type Response: AsyncRead + AsyncWrite;
type Error;
type Future: Future<Item = Self::Response, Error = Self::Error>;
fn connect(&mut self, target: A) -> Self::Future;
}
/// Represents a type that can resolve a `SocketAddr` from some
/// type `Target`.
pub trait Resolve<Target> {
type Error;
type Future: Future<Item = SocketAddr, Error = Self::Error>;
fn lookup(&mut self, target: Target) -> Self::Future;
}
pub struct Connector<C, R, Target>
where
C: ConnectService<SocketAddr> + Clone,
R: Resolve<Target>,
{
connect: C,
resolver: R,
_pd: PhantomData<Target>,
}
impl<C, R, Target> Connector<C, R, Target>
where
C: ConnectService<SocketAddr> + Clone,
R: Resolve<Target>,
{
pub fn new(connect: C, resolver: R) -> Self {
Connector {
connect,
resolver,
_pd: PhantomData,
}
}
}
impl<C, R, Target> ConnectService<Target> for Connector<C, R, Target>
where
C: ConnectService<SocketAddr> + Clone,
R: Resolve<Target>,
{
type Response = C::Response;
type Error = ConnectorError<C, R, Target>;
type Future = ConnectFuture<C, R, Target>;
fn connect(&mut self, target: Target) -> Self::Future {
ConnectFuture {
state: State::Resolving(self.resolver.lookup(target)),
connector: self.connect.clone(),
}
}
}
pub enum ConnectorError<C, R, Target>
where
C: ConnectService<SocketAddr>,
R: Resolve<Target>,
{
Resolve(R::Error),
Connect(C::Error),
}
pub struct ConnectFuture<C, R, Target>
where
C: ConnectService<SocketAddr>,
R: Resolve<Target>,
{
state: State<C, R, Target>,
connector: C,
}
enum State<C, R, Target>
where
C: ConnectService<SocketAddr>,
R: Resolve<Target>,
{
Resolving(R::Future),
Connecting(C::Future),
}
impl<C, R, Target> Future for ConnectFuture<C, R, Target>
where
C: ConnectService<SocketAddr>,
R: Resolve<Target>,
{
type Item = C::Response;
type Error = ConnectorError<C, R, Target>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
match self.state {
State::Resolving(ref mut fut) => {
let address = match fut.poll() {
Ok(Async::Ready(addr)) => addr,
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => return Err(ConnectorError::Resolve(e)),
};
let fut = self.connector.connect(address);
self.state = State::Connecting(fut);
continue;
}
State::Connecting(ref mut fut) => {
return fut.poll().map_err(|e| ConnectorError::Connect(e));
}
}
}
}
}
|
pub fn primes_up_to(upper_bound: u64) -> Vec<u64> {
let mut iter = 1;
let mut vector_primes: Vec<u64> = (2..=upper_bound).collect();
loop {
if iter > upper_bound {
break;
} else {
iter += 1;
}
vector_primes.retain(|x| {
x % iter != 0 || *x == iter
});
}
vector_primes
}
|
pub mod all;
pub mod andor;
pub mod exact;
pub mod factory;
pub mod fuzzy;
pub mod regexp;
mod util;
|
pub mod mylib;
use mylib::{Block, Energe};
trait Machine {
fn new() -> Self {}
fn run(&self) -> Energe {}
fn gen_block() -> Block {
let matchine = Self::new();
let energe = matchine.run();
Block::from_energe(&energe)
}
}
const ID: &'static str = "ID";
#[derive(Clone, Debug)]
pub struct MyMachine<'a> {
id: &'a str
}
impl<'a> Machine for MyMachine<'a> {
fn new() -> {
MyMachine { id: ID }
}
fn run(&self) -> Block {
Block::make(&self.id)
}
}
|
use crate::features::syntax::MiscFeature;
use crate::parse::visitor::tests::assert_misc_feature;
#[test]
fn rest_in_func_expr() {
assert_misc_feature(
"const f = function(a, ...args) {
console.log(a, args);
}",
MiscFeature::RestArguments,
);
}
#[test]
fn rest_in_generator_func_expr() {
assert_misc_feature(
"const f = function*(a, ...args) {
console.log(a, args);
}",
MiscFeature::RestArguments,
);
}
#[test]
fn rest_in_async_func_expr() {
assert_misc_feature(
"const f = async function(a, ...args) {
console.log(a, args);
}",
MiscFeature::RestArguments,
);
}
#[test]
fn rest_in_arrow_func() {
assert_misc_feature(
"const f = (...args) => {
console.log(args);
};",
MiscFeature::RestArguments,
);
}
#[test]
fn rest_in_method() {
assert_misc_feature(
"const obj = {
func(...args) {
console.log(args);
}
};",
MiscFeature::RestArguments,
);
}
|
extern crate clap;
extern crate regex;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
mod structs;
use std::fs;
use std::io::{Read, Write};
use std::process;
use std::collections::BTreeMap;
#[cfg_attr(rustfmt, rustfmt_skip)]
static HEADER_H : &'static str = "\
//===-- InstBuilder.h - SPIR-V instruction builder --------------*- C++ -*-===//\n";
#[cfg_attr(rustfmt, rustfmt_skip)]
static HEADER_CPP : &'static str = "\
//===-- InstBuilder.cpp - SPIR-V instruction builder ------------*- C++ -*-===//\n";
#[cfg_attr(rustfmt, rustfmt_skip)]
static COPYRIGHT : &'static str = "\
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//";
#[cfg_attr(rustfmt, rustfmt_skip)]
static AUTOGEN_COMMENT : &'static str = "\
// AUTOMATICALLY GENERATED from the SPIR-V JSON grammar:
// spirv.core.grammar.json.
// DO NOT MODIFY!";
pub fn write_copyright_autogen_comment(file: &mut fs::File, header: &str) {
file.write_all(header.as_bytes()).unwrap();
file.write_all(COPYRIGHT.as_bytes()).unwrap();
file.write_all(b"\n\n").unwrap();
file.write_all(AUTOGEN_COMMENT.as_bytes()).unwrap();
file.write_all(b"\n\n").unwrap();
}
macro_rules! write {
($content: expr, $path: expr, $header: expr) => {
{
let mut f = fs::File::create($path).expect(&format!("cannot open file: {}", $path));
write_copyright_autogen_comment(&mut f, $header);
f.write_all(&$content.into_bytes()).unwrap();
let mut cmd = process::Command::new("clang-format")
.arg("-i").arg("-style=LLVM").arg($path)
.spawn().expect("failed to execute clang-format");
let ec = cmd.wait().expect("failed to wait on clang-format");
assert!(ec.success());
}
}
}
/// Converts the given `symbol` to use snake case style.
pub fn snake_casify(symbol: &str) -> String {
let re = regex::Regex::new(r"(?P<l>[a-z])(?P<u>[A-Z])").unwrap();
re.replace_all(symbol, "$l-$u").replace("-", "_").to_lowercase()
}
fn get_cpp_type(kind: &str, category: &str) -> String {
if kind.starts_with("Id") || kind == "LiteralInteger" || kind == "LiteralExtInstInteger" {
"uint32_t".to_string()
} else if kind == "LiteralSpecConstantOpInteger" {
"spv::Op".to_string()
} else if kind == "LiteralContextDependentNumber" {
panic!("this kind is not expected to be handled here")
} else if kind == "LiteralString" {
"std::string".to_string()
} else if kind.starts_with("Pair") {
"std::pair<uint32_t, uint32_t>".to_string()
} else if category == "BitEnum" {
format!("spv::{}Mask", kind)
} else {
format!("spv::{}", kind)
}
}
/// Returns a suitable name for the given parameter.
fn get_param_name(param: &structs::Operand) -> String {
if param.name.len() == 0 {
if param.kind == "IdResultType" {
"result_type".to_string()
} else if param.kind == "IdResult" {
"result_id".to_string()
} else {
snake_casify(¶m.kind)
}
} else {
if param.name == "'Default'" {
"default_target".to_string()
} else {
let re = regex::Regex::new(r"\W").unwrap();
snake_casify(&re.replace_all(¶m.name.replace(" ", "_"), ""))
}
}
}
/// Returns the parameter list.
fn get_param_list(params: &[structs::Operand], kinds: &[structs::OperandKind]) -> String {
let list: Vec<String> = params.iter()
.map(|param| {
let name = get_param_name(param);
let kind = get_cpp_type(¶m.kind, &get_category(param, kinds));
if param.quantifier == "" {
format!("{} {}", kind, name)
} else if param.quantifier == "?" {
format!("llvm::Optional<{}> {}", kind, name)
} else {
format!("llvm::ArrayRef<{}> {}", kind, name)
}
})
.collect();
list.join(", ")
}
/// Returns a suitable function name for the given `symbol`.
fn get_function_name(symbol: &str) -> String {
let mut chars = symbol.chars();
match chars.next() {
None => String::new(),
Some(c) => c.to_lowercase().collect::<String>() + chars.as_str(),
}
}
fn get_encode_inst_signature(inst: &structs::Instruction,
kinds: &[structs::OperandKind],
full_qualified: bool)
-> String {
format!("InstBuilder& {}{}({})",
if full_qualified { "InstBuilder::" } else { "" },
get_function_name(&inst.opname),
get_param_list(&inst.operands, kinds))
}
fn get_encode_operand_signature(kind: &structs::OperandKind, full_qualified: bool) -> String {
format!("void {qual}encode{kind}(spv::{kind}{mask} value)",
qual = if full_qualified { "InstBuilder::" } else { "" },
kind = kind.kind,
mask = if kind.category == "BitEnum" {
"Mask"
} else {
""
})
}
fn get_fixup_operand_signature(candidates: &[String],
kinds: &[structs::OperandKind])
-> Vec<String> {
kinds.iter()
.filter(|kind| candidates.iter().find(|c| **c == kind.kind).is_some())
.map(|kind| {
format!("InstBuilder& {name}({ty})",
name = get_function_name(&kind.kind),
ty = get_cpp_type(&kind.kind, &kind.category))
})
.collect()
}
fn encode_param(name: &str, kind: &str, context: &Context) -> String {
let category = &context.get_operand_kind(kind).category;
if category == "Id" {
format!("TheInst.emplace_back({});", name)
} else if category == "Literal" {
if kind == "LiteralString" {
format!("encodeString({});", name)
} else if kind == "LiteralContextDependentNumber" {
panic!("this kind is not expected to be handled here")
} else if kind == "LiteralSpecConstantOpInteger" {
format!("TheInst.emplace_back(static_cast<uint32_t>({}));", name)
} else {
format!("TheInst.emplace_back({});", name)
}
} else if category == "Composite" {
format!("TheInst.emplace_back({name}.first); TheInst.emplace_back({name}.second);",
name = name)
} else if context.has_additional_params(kind) {
format!("encode{}({});", kind, name)
} else {
assert!(category == "BitEnum" || category == "ValueEnum");
format!("TheInst.emplace_back(static_cast<uint32_t>({}));", name)
}
}
fn get_fixup_operand_impl(candidates: &[String], context: &Context) -> Vec<String> {
context.grammar
.operand_kinds
.iter()
.filter(|kind| candidates.iter().find(|c| **c == kind.kind).is_some())
.map(|kind| {
let encode = encode_param("value", &kind.kind, context);
format!("InstBuilder& InstBuilder::{name}({ty} value) {{
if (Expectation.front() != OperandKind::{kind}) {{
TheStatus = Status::Expect{kind};
return *this;
}}
Expectation.pop_front();
{encode}
return *this;
}}",
kind = kind.kind,
name = get_function_name(&kind.kind),
encode = encode,
ty = get_cpp_type(&kind.kind, &kind.category))
})
.collect()
}
fn gen_inst_builder_h(context: &Context) -> String {
let grammar = context.grammar;
let inst_methods: Vec<String> = grammar.instructions
.iter()
.filter(|inst| inst.opname != "OpConstant" && inst.opname != "OpSpecConstant")
.map(|inst| get_encode_inst_signature(inst, &grammar.operand_kinds, false))
.collect();
let operand_methods: Vec<String> = grammar.operand_kinds
.iter()
.filter(|kind| context.has_additional_params(&kind.kind))
.map(|kind| get_encode_operand_signature(kind, false))
.collect();
let param_kinds = get_additional_param_kinds(&grammar.operand_kinds);
let fixup_methods = get_fixup_operand_signature(¶m_kinds, &grammar.operand_kinds);
let mut index = 4;
let error_codes: Vec<String> = param_kinds.iter()
.map(|kind| {
index += 1;
format!("Expect{} = -{}", kind, index)
})
.collect();
format!("#ifndef LLVM_CLANG_SPIRV_INSTBUILDER_H
#define LLVM_CLANG_SPIRV_INSTBUILDER_H
#include <deque>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include \"spirv/unified1/spirv.hpp11\"
#include \"llvm/ADT/ArrayRef.h\"
#include \"llvm/ADT/Optional.h\"
namespace clang {{
namespace spirv {{
/// \\brief SPIR-V word consumer.
using WordConsumer = std::function<void(std::vector<uint32_t> &&)>;
/// \\brief A low-level SPIR-V instruction builder that generates SPIR-V words
/// directly. All generated SPIR-V words will be fed into the WordConsumer
/// passed in the constructor.
///
/// The methods of this builder reflects the layouts of the corresponding
/// SPIR-V instructions. For example, to construct an \"OpMemoryModel Logical
/// Simple\" instruction, just call InstBuilder::opMemoryModel(
/// spv::AddressingModel::Logical, spv::MemoryModel::Simple).
///
/// For SPIR-V instructions that may take additional parameters depending on
/// the value of some previous parameters, additional methods are provided to
/// \"fix up\" the instruction under building. For example, to construct an
/// \"OpDecorate <target-id> ArrayStride 0\" instruction, just call InstBuilder::
/// opDecorate(<target-id>, spv::Decoration::ArrayStride).literalInteger(0).
///
/// .x() is required to finalize the building and feed the result to the
/// consumer. On failure, if additional parameters are needed, the first missing
/// one will be reported by .x() via InstBuilder::Status.
class InstBuilder {{
public:
/// Status of instruction building.
enum class Status: int32_t {{
Success = 0,
NullConsumer = -1,
NestedInst = -2,
ZeroResultType = -3,
ZeroResultId = -4,
{error_codes}
}};
explicit InstBuilder(WordConsumer);
// Disable copy constructor/assignment.
InstBuilder(const InstBuilder &) = delete;
InstBuilder &operator=(const InstBuilder &) = delete;
// Allow move constructor/assignment.
InstBuilder(InstBuilder &&that) = default;
InstBuilder &operator=(InstBuilder &&that) = default;
void setConsumer(WordConsumer);
const WordConsumer &getConsumer() const;
/// \\brief Finalizes the building and feeds the generated SPIR-V words
/// to the consumer.
Status x();
/// \\brief Finalizes the building and returns the generated SPIR-V words.
/// Returns an empty vector if errors happened during the construction.
std::vector<uint32_t> take();
/// \\brief Clears the current instruction under building.
void clear();
// Instruction building methods.
{inst_methods};
// All-in-one methods for creating unary and binary operations.
InstBuilder &unaryOp(spv::Op op, uint32_t result_type, uint32_t result_id,
uint32_t operand);
InstBuilder &binaryOp(spv::Op op, uint32_t result_type, uint32_t result_id,
uint32_t lhs, uint32_t rhs);
// Methods for building constants.
InstBuilder &opConstant(uint32_t result_type, uint32_t result_id,
uint32_t value);
// All-in-one method for creating different types of OpImageSample*.
InstBuilder &
opImageSample(uint32_t result_type, uint32_t result_id,
uint32_t sampled_image, uint32_t coordinate, uint32_t dref,
llvm::Optional<spv::ImageOperandsMask> image_operands,
bool is_explicit, bool is_sparse);
// All-in-one method for creating different types of
// OpImageRead*/OpImageFetch*.
InstBuilder &
opImageFetchRead(uint32_t result_type, uint32_t result_id, uint32_t image,
uint32_t coordinate,
llvm::Optional<spv::ImageOperandsMask> image_operands,
bool is_fetch, bool is_sparse);
// Methods for supplying additional parameters.
{fixup_methods};
private:
enum class OperandKind {{
{kinds}
}};
{operand_methods};
void encodeString(std::string value);
WordConsumer TheConsumer;
std::vector<uint32_t> TheInst; ///< The instruction under construction.
std::deque<OperandKind> Expectation; ///< Expected additional parameters.
Status TheStatus; ///< Current building status.
}};
}} // end namespace spirv
}} // end namespace clang
#endif\n",
kinds = param_kinds.join(",\n"),
error_codes = error_codes.join(",\n"),
inst_methods = inst_methods.join(";\n"),
fixup_methods = fixup_methods.join(";\n"),
operand_methods = operand_methods.join(";\n"))
}
fn check_opcode_result_type_id(inst: &structs::Instruction) -> String {
let mut result = "if (!TheInst.empty()) { TheStatus = Status::NestedInst; return *this; }\n"
.to_string();
if !inst.operands.is_empty() {
let mut index = 0;
if inst.operands[index].kind == "IdResultType" {
result += "if (result_type == 0) \
{ TheStatus = Status::ZeroResultType; return *this; }\n";
index += 1;
}
if inst.operands[index].kind == "IdResult" {
result += "if (result_id == 0) { TheStatus = Status::ZeroResultId; return *this; }\n";
}
}
result
}
fn push_argument(name: &str, kind: &str, quantifier: &str, context: &Context) -> String {
if quantifier == "" {
encode_param(name, kind, context)
} else if quantifier == "?" {
format!("if ({name}.hasValue()) {{ const auto& val = {name}.getValue(); {sub} }}",
name = name,
sub = push_argument("val", kind, "", context))
} else {
if kind.starts_with("Id") {
format!("TheInst.insert(TheInst.end(), {name}.begin(), {name}.end());",
name = name)
} else {
format!("for (const auto& param : {name}) {{ {sub}; }}",
name = name,
sub = push_argument("param", kind, "", context))
}
}
}
fn get_category(param: &structs::Operand, kinds: &[structs::OperandKind]) -> String {
kinds.iter()
.find(|x| x.kind == param.kind)
.expect("should found")
.category
.to_string()
}
fn push_arguments(params: &[structs::Operand], context: &Context) -> String {
if params.is_empty() {
return String::new();
}
let list: Vec<String> = params.iter()
.map(|param| {
push_argument(&get_param_name(¶m),
¶m.kind,
¶m.quantifier,
context)
})
.collect();
list.join("\n")
}
fn get_build_inst_impl(inst: &structs::Instruction, context: &Context) -> String {
format!("{signature} {{
{result_type_id}
TheInst.reserve({count});
TheInst.emplace_back(static_cast<uint32_t>(spv::Op::{opname}));
{operands}
return *this;
}}",
signature = get_encode_inst_signature(inst, &context.grammar.operand_kinds, true),
count = inst.operands.len() + 1,
opname = inst.opname,
result_type_id = check_opcode_result_type_id(inst),
operands = push_arguments(&inst.operands, context))
}
fn get_build_operand_impl(kind: &structs::OperandKind) -> Option<String> {
if kind.category == "ValueEnum" {
let cases: Vec<String> = kind.enumerants
.iter()
.filter_map(|e| if e.parameters.len() == 0 {
None
} else {
let params: Vec<String> = e.parameters
.iter()
.map(|p| format!("Expectation.emplace_back(OperandKind::{})", p.kind))
.collect();
Some(format!(" case spv::{kind}::{symbol}: {{ {expect}; }} break;",
kind = kind.kind,
symbol = e.symbol,
expect = params.join("; ")))
})
.collect();
if cases.len() == 0 {
// No enumerant need additional arguments, therefore no need for
// special encoding method for this operand kind.
None
} else {
Some(format!("{signature} {{
switch (value) {{
{cases}
default: break;
}}
TheInst.emplace_back(static_cast<uint32_t>(value));
}}",
signature = get_encode_operand_signature(kind, true),
cases = cases.join("\n")))
}
} else if kind.category == "BitEnum" {
let cases: Vec<String> = kind.enumerants
.iter()
.filter_map(|e| if e.parameters.len() == 0 {
None
} else {
let params: Vec<String> = e.parameters
.iter()
.map(|p| format!("Expectation.emplace_back(OperandKind::{})", p.kind))
.collect();
Some(format!("if (bitEnumContains(value, spv::{kind}Mask::{symbol})) \
{{ {expect}; }}",
kind = kind.kind,
symbol = e.symbol,
expect = params.join("; ")))
})
.collect();
if cases.len() == 0 {
// No enumerant need additional arguments, therefore no need for
// special encoding method for this operand kind.
None
} else {
Some(format!("{signature} {{
{cases}
TheInst.emplace_back(static_cast<uint32_t>(value));
}}",
signature = get_encode_operand_signature(kind, true),
cases = cases.join("\n")))
}
} else {
panic!("only ValueEnum and BitEnum are handled here");
}
}
/// Returns all operand kinds used in additional parameters for BitEnum and
/// ValueEnum enumerants.
fn get_additional_param_kinds(kinds: &[structs::OperandKind]) -> Vec<String> {
let mut result: Vec<String> = kinds.iter()
.flat_map(|k| {
k.enumerants.iter().flat_map(|e| e.parameters.iter().map(|p| p.kind.clone()))
})
.collect();
result.sort();
result.dedup();
result
}
/// Returns a list of bitEnumContains() functions for all BitEnums that can
/// potentially require additional parameters.
fn get_contain_function(kind: &structs::OperandKind, context: &Context) -> Option<String> {
if kind.category == "BitEnum" && context.has_additional_params(&kind.kind) {
Some(format!("inline bool bitEnumContains(spv::{kind}Mask bits, spv::{kind}Mask bit) {{ \
return (uint32_t(bits) & uint32_t(bit)) != 0; }}",
kind = kind.kind))
} else {
None
}
}
fn gen_inst_builder_cpp(context: &Context) -> String {
let grammar = context.grammar;
let inst_impls: Vec<String> = grammar.instructions
.iter()
.filter(|inst| inst.opname != "OpConstant" && inst.opname != "OpSpecConstant")
.map(|inst| get_build_inst_impl(inst, context))
.collect();
let operand_impls: Vec<String> = grammar.operand_kinds
.iter()
.filter(|kind| kind.category == "ValueEnum" || kind.category == "BitEnum")
.filter_map(|kind| get_build_operand_impl(kind))
.collect();
let contain_impls: Vec<String> = grammar.operand_kinds
.iter()
.filter(|kind| kind.category == "BitEnum")
.filter_map(|kind| get_contain_function(kind, context))
.collect();
let param_kinds = get_additional_param_kinds(&grammar.operand_kinds);
let fixup_impls = get_fixup_operand_impl(¶m_kinds, context);
let errors: Vec<String> = param_kinds.iter()
.map(|kind| format!("case OperandKind::{k}: return Status::Expect{k};", k = kind))
.collect();
format!("#include \"clang/SPIRV/InstBuilder.h\"
namespace clang {{
namespace spirv {{
static_assert(spv::Version == {version:#010x} && spv::Revision == {revision},
\"Needs to regenerate outdated InstBuilder\");
namespace {{
{contains}
}}
InstBuilder::InstBuilder(WordConsumer consumer)
: TheConsumer(consumer), TheStatus(Status::Success) {{}}
void InstBuilder::setConsumer(WordConsumer consumer) {{ TheConsumer = consumer; }}
const WordConsumer &InstBuilder::getConsumer() const {{ return TheConsumer; }}
InstBuilder::Status
InstBuilder::x() {{
if (TheConsumer == nullptr) return Status::NullConsumer;
if (TheStatus != Status::Success) return TheStatus;
if (!Expectation.empty()) {{
switch (Expectation.front()) {{
{errors}
}}
}}
if (!TheInst.empty()) TheInst.front() |= uint32_t(TheInst.size()) << 16;
TheConsumer(std::move(TheInst));
TheInst.clear();
return TheStatus;
}}
void InstBuilder::clear() {{
TheInst.clear();
Expectation.clear();
TheStatus = Status::Success;
}}
{inst_methods}
{operand_methods}
{fixup_methods}
}} // end namespace spirv
}} // end namespace clang\n",
errors = errors.join("\n"),
contains = contain_impls.join("\n"),
inst_methods = inst_impls.join("\n\n"),
fixup_methods = fixup_impls.join("\n\n"),
operand_methods = operand_impls.join("\n\n"),
version = (grammar.major_version << 16) | (grammar.minor_version << 8),
revision = grammar.revision)
}
struct Context<'a> {
/// The SPIR-V grammar
grammar: &'a structs::Grammar,
/// Mapping from SPIR-V operand kind names to their grammar
operand_kinds: BTreeMap<&'a str, &'a structs::OperandKind>,
/// Mapping from SIPR-V operand kind names to whether may have additional parameters
with_additional_params: BTreeMap<&'a str, bool>,
}
impl<'a> Context<'a> {
pub fn new(grammar: &'a structs::Grammar) -> Context<'a> {
let mut ret = Context {
grammar: grammar,
operand_kinds: BTreeMap::new(),
with_additional_params: BTreeMap::new(),
};
for kind in &grammar.operand_kinds {
let key = &kind.kind;
ret.operand_kinds.insert(key, kind);
let has_param = kind.enumerants.iter().any(|e| !e.parameters.is_empty());
ret.with_additional_params.insert(key, has_param);
}
ret
}
/// Returns the grammar for the given operand kind.
pub fn get_operand_kind(&self, kind: &str) -> &'a structs::OperandKind {
*self.operand_kinds.get(&kind).expect("key not found")
}
/// Returns whether the operand kind may have additional parameters.
pub fn has_additional_params(&self, kind: &str) -> bool {
*self.with_additional_params.get(&kind).expect("key not found")
}
}
fn main() {
let matches = clap::App::new("SPIR-V builder generator")
.arg(clap::Arg::with_name("grammar")
.help("SPIR-V core grammar file")
.required(true)
.index(1))
.get_matches();
let input = matches.value_of("grammar").unwrap();
let mut contents = String::new();
{
let mut file = fs::File::open(input).unwrap();
file.read_to_string(&mut contents).unwrap();
}
let grammar: structs::Grammar = serde_json::from_str(&contents).unwrap();
let context = Context::new(&grammar);
write!(gen_inst_builder_h(&context), "InstBuilder.h", HEADER_H);
write!(gen_inst_builder_cpp(&context),
"InstBuilderAuto.cpp",
HEADER_CPP);
}
|
/**
MSDN:
Windows Development (Windows) / Windows Application UI Development |=>
Windows and Messages / Window Classes / Window Class Reference |=>
Window Class Styles
**/
use super::super::prelude::WindowClassStyle;
pub static VerticalRedraw : WindowClassStyle = 0x0001;
pub static HorizontalRedraw : WindowClassStyle = 0x0002;
pub static DoubleClicks : WindowClassStyle = 0x0008;
pub static WindowDC : WindowClassStyle = 0x0020;
pub static ClassDC : WindowClassStyle = 0x0040;
pub static ParentDC : WindowClassStyle = 0x0080;
pub static NoClose : WindowClassStyle = 0x0200;
pub static SaveBits : WindowClassStyle = 0x0800;
pub static ByteAlignClient : WindowClassStyle = 0x1000;
pub static ByteAlignWindow : WindowClassStyle = 0x2000;
pub static GlobalClass : WindowClassStyle = 0x4000;
pub static InputMethodEditor : WindowClassStyle = 0x00010000;
pub static DropShadow : WindowClassStyle = 0x00020000; |
use proconio::input;
fn main() {
let mut solver = Solver::new();
println!("{}", solver.solve());
}
struct Solver {
h: isize,
w: isize,
a: isize,
b: isize,
ans: i64,
}
impl Solver {
fn new() -> Solver {
input! {
h: isize,
w: isize,
a: isize,
b: isize,
}
Solver {
h: h,
w: w,
a: a,
b: b,
ans: 0,
}
}
fn solve(&mut self) -> i64 {
self.ans = 0;
self.put(0, 0, self.a, self.b);
self.ans
}
fn put(&mut self, tatamis: u32, idx: isize, a: isize, b: isize) {
if idx == self.h * self.w {
self.ans += 1;
return;
}
if (tatamis >> idx) & 1 == 1 {
self.put(tatamis, idx + 1, a, b);
return;
}
if a > 0 {
let tatami_a1 = 1 << idx;
// 横(右端でないことを考慮)
let tatami_a2_idx = idx + 1;
if (idx % self.w != self.w - 1) && (tatamis >> tatami_a2_idx) & 1 == 0 {
let tatami_a2 = 1 << tatami_a2_idx;
self.put(tatamis | tatami_a1 | tatami_a2, idx + 1, a - 1, b);
}
// 縦(下端でないことを考慮)
let tatami_a2_idx = idx + self.w;
if (idx / self.w < self.h - 1) && (tatamis >> tatami_a2_idx) & 1 == 0 {
let tatami_a2 = 1 << tatami_a2_idx;
self.put(tatamis | tatami_a1 | tatami_a2, idx + 1, a - 1, b);
}
}
if b > 0 {
let tatami_b = 1 << idx;
self.put(tatamis | tatami_b, idx + 1, a, b - 1);
}
}
}
|
use std::net::{Ipv4Addr, SocketAddrV4, UdpSocket};
use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt};
use std::thread;
use std::io;
use std::io::Cursor;
mod attributes;
#[allow(dead_code)]
enum MessageType {
Request,
Indication,
SuccessResponse,
ErrorResponse,
}
#[allow(dead_code)]
struct Message<'a> {
pub header: Header<'a>,
pub attributes: Vec<Attribute>,
}
impl Message<'_> {
fn new(k_message_type: MessageType) -> Message {
// generate random transaction ID
let transaction_id: [u8; 12];
let header = Header::new(
k_message_type,
0,
&transaction_id[..]
);
return Message {
header: header,
attributes: vec![],
};
}
fn add(&self, attribute: Attribute) {
// increase length
// append attribtue
}
fn parse(buf: &[u8]) -> Result<Message, io::Error> {
// parse header
// parse message
}
fn packetize(&self) -> Result<Vec<u8>, io::Error> {
let mut header = vec![];
header.write_u16::<BigEndian>(self.message_type)?;
header.write_u16::<BigEndian>(self.message_length)?;
header.write_u32::<BigEndian>(self.magic_cookie)?;
for &b in self.transaction_id {
header.write_u8(b)?;
}
Ok(header)
}
}
struct Header<'a> {
pub message_type: u16,
pub message_length: u16,
pub magic_cookie: u32,
pub transaction_id: &'a [u8],
}
impl Header<'_> {
fn new(k_message_type: MessageType, message_length: u16, transaction_id: &[u8]) -> Header {
// only support binding request
let message_type: u16 = match k_message_type {
MessageType::Request => 0x0001,
MessageType::Indication => 0x0011,
MessageType::SuccessResponse => 0x0101,
MessageType::ErrorResponse => 0x0111,
};
return Header {
message_type: message_type,
message_length: message_length,
magic_cookie: 0x2112a442,
transaction_id: transaction_id,
};
}
fn parse(buf: &[u8]) -> Result<Header, io::Error> {
let mut rdr = Cursor::new(buf);
Ok(Header {
message_type: rdr.read_u16::<BigEndian>()?,
message_length: rdr.read_u16::<BigEndian>()?,
magic_cookie: 0x2112a442,
transaction_id: &buf[8..20],
})
}
fn packetize(&self) -> Result<Vec<u8>, io::Error> {
let mut header = vec![];
header.write_u16::<BigEndian>(self.message_type)?;
header.write_u16::<BigEndian>(self.message_length)?;
header.write_u32::<BigEndian>(self.magic_cookie)?;
for &b in self.transaction_id {
header.write_u8(b)?;
}
Ok(header)
}
}
#[allow(dead_code)]
struct Attribute {
pub attribute_type: u16,
pub attribute_length: u16,
pub value: Vec<u8>,
}
// header validation
pub fn validate_header(buf: &[u8]) -> bool {
// Bindingメソッドのみサポートするよ
// check the first two bits are 0
if buf[0] & 0xc0 != 0 {
return false;
}
// the magic cookie field has the correct value
if &buf[4..8] != [0x21, 0x12, 0xa4, 0x42] {
return false;
}
// check message class
// request
if ( buf[0] & 0x01 == 0) && ( buf[1] & 0x10 == 0) {
return true;
}
// indication
if ( buf[0] & 0x01 == 0) && ( buf[1] & 0x10 == 1) {
return false;
}
// success response
if ( buf[0] & 0x01 == 1) && ( buf[1] & 0x10 == 0) {
return false;
}
// error response
if ( buf[0] & 0x01 == 1) && ( buf[1] & 0x10 == 1) {
return false;
}
// transaction ID
// &buf[8..24]
return true;
}
fn generate_response_message() -> Result<Vec<u8>, io::Error> {
let mut message = vec![];
// 0x0020: XOR-MAPPED-ADDRESS
{
let header: u16 = 0x0020;
let length: u16 = 0x0008;
let family: u16 = 0x0001;
let x_port: u16 = 0xf28b;
let x_address: [u8; 4] = [0x8d, 0x06, 0xa4, 0x41];
message.write_u16::<BigEndian>(header)?;
message.write_u16::<BigEndian>(length)?;
message.write_u16::<BigEndian>(family)?;
message.write_u16::<BigEndian>(x_port)?;
for &b in &x_address {
message.write_u8(b)?;
}
}
// 0x0001: MAPPED-ADDRESS
{
let header: u16 = 0x0001;
let length: u16 = 0x0008;
let family: u16 = 0x0001;
let port: u16 = 0xd399;
let address: [u8; 4] = [0xac, 0x14, 0x00, 0x03];
message.write_u16::<BigEndian>(header)?;
message.write_u16::<BigEndian>(length)?;
message.write_u16::<BigEndian>(family)?;
message.write_u16::<BigEndian>(port)?;
for &b in &address {
message.write_u8(b)?;
}
}
// 0x802b: RESPONSE-ORIGIN
{
let header: u16 = 0x802b;
let length: u16 = 0x0008;
let family: u16 = 0x0001;
let port: u16 = 0x0d96;
let address: [u8; 4] = [0xac, 0x14, 0x00, 0x02];
message.write_u16::<BigEndian>(header)?;
message.write_u16::<BigEndian>(length)?;
message.write_u16::<BigEndian>(family)?;
message.write_u16::<BigEndian>(port)?;
for &b in &address {
message.write_u8(b)?;
}
}
// 0x802c: OTHER-ADDRESS
{
let header: u16 = 0x802c;
let length: u16 = 0x0008;
let family: u16 = 0x0001;
let x_port: u16 = 0x0d97;
let address: [u8; 4] = [0xac, 0x14, 0x00, 0x02];
message.write_u16::<BigEndian>(header)?;
message.write_u16::<BigEndian>(length)?;
message.write_u16::<BigEndian>(family)?;
message.write_u16::<BigEndian>(x_port)?;
for &b in &address {
message.write_u8(b)?;
}
}
// 0x8022: SOFTWARE
{
let header: u16 = 0x8022;
let length: u16 = 0x0016;
let body: [u8; 0x16] = [
0x43, 0x6f, 0x74, 0x75, 0x72,
0x6e, 0x2d, 0x34, 0x2e, 0x32,
0x2e, 0x31, 0x2e, 0x32, 0x20,
0x27, 0x4d, 0x6f, 0x6e, 0x7a,
0x61, 0x27
];
message.write_u16::<BigEndian>(header)?;
message.write_u16::<BigEndian>(length)?;
for &b in &body {
message.write_u8(b)?;
}
}
for _ in 0..(message.len() % 4) {
message.write_u8(0x00)?;
}
Ok(message)
}
fn generate_response(req_header: &Header) -> Result<Vec<u8>, io::Error> {
let mut send_buf = vec![];
let mut message = generate_response_message()?;
let header = Header::new(
MessageType::SuccessResponse,
message.len() as u16,
req_header.transaction_id
);
send_buf.append(&mut header.packetize()?);
send_buf.append(&mut message);
Ok(send_buf)
}
pub fn recv() -> Result<(), io::Error> {
// Define the local connection information
let ip = Ipv4Addr::new(0, 0, 0, 0);
let connection = SocketAddrV4::new(ip, 3478);
// Bind the socket
let socket = UdpSocket::bind(connection)?;
loop {
// Read from the socket
let mut buf = [0; 1500];
let s = socket.try_clone()?;
match socket.recv_from(&mut buf) {
Ok((amt, src)) => {
thread::spawn(move || {
// Print only the valid data (slice)
let mut i = 0;
for b in &buf[0 .. amt] {
print!("{:02x}", b);
if i%2 == 1 {
print!(" ");
}
i += 1;
}
let hdr = Header::parse(&buf[..]).unwrap();
println!("");
println!("src: {}", src);
println!("type: {:x}", hdr.message_type);
println!("length: {:x}", hdr.message_length);
println!("cookie: {:x}", hdr.magic_cookie);
println!("trans_id: {:x?}", hdr.transaction_id);
let send_buf = generate_response(&hdr).unwrap();
if validate_header(&buf) {
s.send_to(&send_buf, src).expect("failed to send response");
}
});
},
Err(e) => {
panic!(e);
}
}
}
}
|
#[doc = "Register `HASH_CSR49` reader"]
pub type R = crate::R<HASH_CSR49_SPEC>;
#[doc = "Register `HASH_CSR49` writer"]
pub type W = crate::W<HASH_CSR49_SPEC>;
#[doc = "Field `CS49` reader - CS49"]
pub type CS49_R = crate::FieldReader<u32>;
#[doc = "Field `CS49` writer - CS49"]
pub type CS49_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 32, O, u32>;
impl R {
#[doc = "Bits 0:31 - CS49"]
#[inline(always)]
pub fn cs49(&self) -> CS49_R {
CS49_R::new(self.bits)
}
}
impl W {
#[doc = "Bits 0:31 - CS49"]
#[inline(always)]
#[must_use]
pub fn cs49(&mut self) -> CS49_W<HASH_CSR49_SPEC, 0> {
CS49_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "HASH context swap registers\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hash_csr49::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`hash_csr49::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct HASH_CSR49_SPEC;
impl crate::RegisterSpec for HASH_CSR49_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`hash_csr49::R`](R) reader structure"]
impl crate::Readable for HASH_CSR49_SPEC {}
#[doc = "`write(|w| ..)` method takes [`hash_csr49::W`](W) writer structure"]
impl crate::Writable for HASH_CSR49_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets HASH_CSR49 to value 0"]
impl crate::Resettable for HASH_CSR49_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#![allow(warnings)]
use std::net::{TcpListener};
use std::collections::HashMap;
use std::io::{BufReader,BufWriter};
use std::net::TcpStream;
use std::sync::{Arc,Condvar,Mutex};
use std::io::prelude::*;
use std::thread;
use std::sync::mpsc::channel;
use std::sync::mpsc::{sync_channel, SyncSender , Receiver};
use std::fs::File;
use std::fs::OpenOptions;
use std::collections::HashSet;
extern crate chan;
fn main() {
let listener = TcpListener::bind("127.0.0.1:8080").unwrap();
let mut group_chat = Group_chat :: new() ;
let mut users = User_info_map :: new() ;
let users = Arc :: new(Mutex::new(users));
let group_chat = Arc :: new(Mutex::new(group_chat));
let mut online_users = HashMap::new();
let online_users = Arc :: new(Mutex::new(online_users));
for stream in listener.incoming() {
let group_chat = group_chat.clone() ;
let users = users.clone();
let online_users = online_users.clone();
match stream {
Ok(stream) => {
thread::spawn(move|| {
login(stream , group_chat , users, online_users);
});
}
Err(e) => {}
}
}
}
fn login(mut stream : TcpStream , group_chat : Arc<Mutex<Group_chat>> , users : Arc<Mutex<User_info_map>>,
online_users : Arc<Mutex<HashMap<String , TcpStream>>>) {
let mut stream_loop = stream.try_clone().unwrap() ;
let mut stream_loop2 = stream.try_clone().unwrap() ;
let mut read_method = BufReader::new(stream) ;
loop{
let mut stream_loop2 = stream_loop.try_clone().unwrap() ;
stream_loop.write("If existing user please enter Y or to create an account enter N ".as_bytes());
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string);
let mut vec : Vec<char> = Vec :: new() ;
for x in my_string.clone().chars() {
vec.push(x);
}
if vec.len() != 3 {
continue;
}
if vec[0] == 'Y'{
stream_loop2.write("Please enter your user name : ".as_bytes());
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string);
let name = my_string.clone();
stream_loop2.write("Please enter your password : ".as_bytes());
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string);
let password = my_string.clone();
let mut flag = false;
{
flag = users.lock().unwrap().contains_user(name.clone());
}
if flag {
let mut password2 = String :: new() ;
{
password2 = users.lock().unwrap().get_password(name.clone());
}
if password2 == password{
let group_chat2 = group_chat.clone();
user_loop(stream_loop2 , group_chat2 , name.clone(), online_users , users);
break ;
}
else{
stream_loop2.write("incorrect password \n".as_bytes());
}
}
else{
stream_loop2.write("no such user exists ! \n".as_bytes());
}
}
else if vec[0] == 'N'{
stream_loop2.write("Please enter your user name : ".as_bytes());
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string);
let name = my_string.clone();
stream_loop2.write("Please enter your password : ".as_bytes());
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string);
let password = my_string.clone();
users.lock().unwrap().Create_user(name.clone() , password.clone());
}
else{
stream_loop2.write("Please give valid response \n".as_bytes());
}
}
}
fn user_loop (mut stream : TcpStream ,group_chat : Arc<Mutex<Group_chat>> , name : String,
online_users : Arc<Mutex<HashMap<String, TcpStream>>> , users : Arc<Mutex<User_info_map>> ){
let mut stream = stream.try_clone().unwrap();
let mut stream_user = stream.try_clone().unwrap();
{
online_users.lock().unwrap().insert(name.clone() ,stream_user);
}
loop{
let mut stream_loop = stream.try_clone().unwrap();
let mut stream_loop2 = stream.try_clone().unwrap();
let mut stream_loop3 = stream_loop2.try_clone().unwrap();
stream_loop.write("Enter F to chat with friend or A to add new friend\nEnter J for Join or C for Create chat rooms : ".as_bytes());
let mut read_method = BufReader::new(stream_loop) ;
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string) ;
my_string.pop();
my_string.pop();
{
users.lock().unwrap().set_busy_true(name.clone()) ;
}
let mut flag = false;
{
flag = users.lock().unwrap().get_priavte_chat(name.clone());
}
if flag {
if my_string == "Yes".to_string(){
stream_loop2.write("you are now in private chat enter start to begin chat. \n".as_bytes());
loop {
if users.lock().unwrap().get_busy(name.clone())==false {
break ;
}
continue ;
}
}
else if my_string == "No".to_string(){
stream_loop2.write("Decline chat request? (Y/N) \n".as_bytes());
loop {
if users.lock().unwrap().get_busy(name.clone())==false {
break ;
}
}
continue ;
}
else {
stream_loop2.write("Invalid response , Decline chat request? (Y/N), If Enter N chat will start\n".as_bytes());
loop {
if users.lock().unwrap().get_busy(name.clone())==false {
break ;
}
}
continue ;
}
}
else{
let mut vec : Vec<char> = Vec :: new() ;
for x in my_string.clone().chars() {
vec.push(x);
}
if(vec[0] == 'A'){
stream_loop2.write("please enter your friends user name : ".as_bytes());
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string) ;
let mut flag = false;
{
flag = users.lock().unwrap().contains_user(my_string.clone());
}
if flag{
users.lock().unwrap().add_friend(name.clone() , my_string.clone());
{
users.lock().unwrap().set_busy_false(name.clone()) ;
}
}
else{
stream_loop2.write("no such user id\n".as_bytes());
{
users.lock().unwrap().set_busy_false(name.clone()) ;
}
}
}
else if (vec[0] == 'F'){
let vec = users.lock().unwrap().get_friend_list(name.clone());
let mut temp : HashSet<String> = HashSet :: new() ;
for x in 0..vec.len(){
let mut flag = false ;
{
flag = online_users.lock().unwrap().contains_key(&vec[x].clone());
}
if flag{
temp.insert(vec[x].clone());
}
}
if temp.len() == 0 {
stream_loop2.write("no friends online bohooooo\n".as_bytes());
{
users.lock().unwrap().set_busy_false(name.clone()) ;
}
}
else{
stream_loop2.write("Here is your online friends list : \n".as_bytes());
for x in temp.iter() {
stream_loop2.write(x.clone().as_bytes());
}
stream_loop2.write("Enter a friends name : ".as_bytes());
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string) ;
if temp.contains(&my_string.clone()) {
let mut temp : bool = false ;
{
temp = users.lock().unwrap().get_busy(my_string.clone());
}
if temp {
stream_loop2.write("User is busy , you can only send him/her a message : ".to_string().as_bytes());
let mut frd_stream = online_users.lock().unwrap().get_mut(&my_string).unwrap().try_clone().unwrap();
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string) ;
let mut dm_sender_name = name.clone();
dm_sender_name.pop();
dm_sender_name.pop();
my_string = dm_sender_name + &" send you a messsage : ".to_string() + &my_string +
"(If you want to reply , you have to go back to looby) \n";
frd_stream.write(my_string.clone().as_bytes());
{
users.lock().unwrap().set_busy_false(name.clone()) ;
}
continue ;
}
else{
{
users.lock().unwrap().set_private_chat_true(my_string.clone());
}
let mut stream1 : TcpStream ;
let mut stream2 : TcpStream ;
{
stream1 = online_users.lock().unwrap().get_mut(&my_string.clone()).unwrap().try_clone().unwrap()
}
{
stream2 = online_users.lock().unwrap().get_mut(&name.clone()).unwrap().try_clone().unwrap()
}
{
users.lock().unwrap().set_busy_true(my_string.clone());
}
let mut name1 = my_string.clone() ;
let mut name1_2 = my_string.clone();
let mut name2 = name.clone() ;
name1.pop();
name1.pop();
name2.pop();
name2.pop();
let mut stream1_1 = stream1.try_clone().unwrap();
let mut name_temp = name.clone();
name_temp.pop() ;
name_temp.pop() ;
let mut my_string = "\n".to_string()+ &name_temp + " would like to chat with you , Accept by entering Yes or else No\n" ;
stream1.write(my_string.as_bytes());
my_string = "waiting for other user to accept \n".to_string();
stream2.write(my_string.clone().as_bytes());
let mut my_string = String :: new() ;
let mut read = BufReader :: new(stream1_1);
read.read_line(&mut my_string);
my_string.pop();
my_string.pop();
if my_string == "start".to_string() || my_string == "N".to_string(){
stream1.write("Chat is live\n".to_string().as_bytes());
let quit_flag_indi = Quit_flag :: new() ;
let quit_flag_indi = Arc :: new(Mutex::new(quit_flag_indi));
let quit_flag_indi2 = quit_flag_indi.clone();
let quit_flag_indi_2 = Quit_flag :: new() ;
let quit_flag_indi_2 = Arc :: new(Mutex::new(quit_flag_indi_2));
let quit_flag_indi_2_2 = quit_flag_indi_2.clone();
stream2.write("Accepted , you can now start chatting\n".to_string().as_bytes());
user_chat_loop(stream1,stream2,name1.clone(),name2.clone() , quit_flag_indi , quit_flag_indi_2);
while quit_flag_indi2.lock().unwrap().get() == false || quit_flag_indi_2_2.lock().unwrap().get() == false{} ;
{
users.lock().unwrap().set_private_chat_false(name1_2.clone());
}
{
users.lock().unwrap().set_busy_false(name1_2.clone());
}
continue ;
}
else {
if my_string != "Y".to_string() {
stream1.write("Invalid response , Request Declined\n".to_string().as_bytes());
}
{
users.lock().unwrap().set_private_chat_false(name1_2.clone());
}
{
users.lock().unwrap().set_busy_false(name1_2.clone());
}
stream2.write("Your request is Declined\n".as_bytes()) ;
{
users.lock().unwrap().set_busy_false(name.clone()) ;
}
continue ;
}
}
}
stream_loop2.write("Wrong friend name ! \n".as_bytes());
}
}
else if(vec[0] == 'C'){
{
stream_loop2.write("please enter chatroom name:".as_bytes());
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string) ;
let group_chat1 = group_chat.clone();
create_chatroom(group_chat1 , my_string.clone());
let group_chat2 = group_chat.clone();
let mut quit_flag = Quit_flag::new() ;
let quit_flag = Arc :: new(Mutex::new(quit_flag));
let quit_flag2 = quit_flag.clone();
join_group_chat(stream_loop2 , my_string.clone() , group_chat2 , name.clone() , quit_flag2);
let chat_reminder : String = "Now you are in Chatroom ".to_string() + &my_string.clone() + &",type in 'QUIT' to quit to go back to lobby\n".to_string();
stream_loop3.write(chat_reminder.as_bytes());
while quit_flag.lock().unwrap().get() == false {}
{
users.lock().unwrap().set_busy_false(name.clone()) ;
}
continue ;
}
}
else if (vec[0] == 'J'){
let group_chat3 = group_chat.clone();
let set = group_chat3.lock().unwrap().get_chatroom_list();
if set.len() == 0 {
stream_loop2.write("There are no live chatrooms\n".as_bytes());
{
users.lock().unwrap().set_busy_false(name.clone()) ;
}
continue;
}
for x in set.iter() {
stream_loop2.write(x.as_bytes());
}
stream_loop2.write("please enther chatroom name:".as_bytes());
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string) ;
if set.contains(&my_string.clone()) {
let group_chat = group_chat.clone();
let mut quit_flag = Quit_flag::new() ;
let quit_flag = Arc :: new(Mutex::new(quit_flag));
let quit_flag2 = quit_flag.clone();
join_group_chat(stream_loop2 , my_string.clone() , group_chat , name.clone() , quit_flag2);
let chat_reminder : String = "Now you are in Chatroom ".to_string() + &my_string.clone() + &",type in 'QUIT' to quit to go back to lobby\n".to_string();
stream_loop3.write(chat_reminder.as_bytes());
while quit_flag.lock().unwrap().get() == false{}
{
users.lock().unwrap().set_busy_false(name.clone()) ;
}
continue ;
}
else {
stream_loop2.write("Wrong chatroom name! \n".as_bytes());
{
users.lock().unwrap().set_busy_false(name.clone()) ;
}
continue;
}
}
else{
stream_loop2.write("please enter valid response\n".as_bytes());
{
users.lock().unwrap().set_busy_false(name.clone()) ;
}
continue;
}
{
users.lock().unwrap().set_busy_false(name.clone()) ;
}
}
}
}
fn user_chat_loop (mut stream1 : TcpStream , mut stream2 : TcpStream , name1 : String , name2 : String ,
quit_flag : Arc<Mutex<Quit_flag>> , quit_flag2 : Arc<Mutex<Quit_flag>>){
let mut stream1_2 = stream1.try_clone().unwrap() ;
let mut stream1_3 = stream1.try_clone().unwrap() ;
let mut stream1_4 = stream1.try_clone().unwrap() ;
let mut stream2_2 = stream2.try_clone().unwrap() ;
let mut stream2_3 = stream2.try_clone().unwrap() ;
let mut stream2_4 = stream2.try_clone().unwrap() ;
let quit_flag_thread1 = quit_flag.clone();
let quit_flag_thread2 = quit_flag2.clone();
let quit_flag_thread1_2 = quit_flag.clone();
let quit_flag_thread2_2 = quit_flag2.clone();
thread :: spawn(move || {
loop {
let mut read_method = BufReader::new(&stream1_2) ;
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string) ;
my_string.pop();
my_string.pop();
if my_string == "QUIT".to_string() {
quit_flag_thread2.lock().unwrap().set() ;
my_string = name1.clone() + " has quit plz enter any key to go back to lobby \n" ;
stream2_3.write(my_string.clone().as_bytes());
stream1_4.write("waiting for other user to quit\n".to_string().as_bytes());
break ;
}
let mut temp = false;
{
temp = quit_flag_thread1.lock().unwrap().get() ;
}
if temp {
quit_flag_thread2.lock().unwrap().set();
break ;
}
my_string = name1.clone() + " : "+ &my_string + "\n";
stream2_3.write(my_string.clone().as_bytes()); // write to other users
}
});
thread :: spawn(move || {
loop {
let mut read_method = BufReader::new(&stream2_2) ;
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string) ;
my_string.pop();
my_string.pop();
if my_string == "QUIT".to_string(){
quit_flag_thread1_2.lock().unwrap().set() ;
my_string = name2.clone() + " has quit plz enter any key to go back to lobby \n" ;
stream1_3.write(my_string.clone().as_bytes());
stream2_4.write("waiting for other user to quit\n".to_string().as_bytes());
break ;
}
let mut temp = false;
{
temp = quit_flag_thread2_2.lock().unwrap().get() ;
}
if temp {
quit_flag_thread1_2.lock().unwrap().set();
break ;
}
my_string = name2.clone() + " : "+ &my_string + "\n";
stream1_3.write(my_string.clone().as_bytes()); //Recieving from other user
}
});
}
fn join_group_chat (mut stream : TcpStream , name : String ,
group_chat :Arc<Mutex<Group_chat>> , user_name : String , quit_flag : Arc<Mutex<Quit_flag>>){
let sender = group_chat.lock().unwrap().get_sender(name.clone()) ;
let receiver = group_chat.lock().unwrap().get_receiver(name.clone());
let(unique_s , unique_r) = chan :: sync(100);
group_chat.lock().unwrap().add_member(name.clone() ,user_name.clone() , unique_s );
handle_client( stream , sender , unique_r , user_name.clone() , quit_flag , group_chat , name.clone());
}
fn handle_client(mut stream : TcpStream ,sender : chan :: Sender<String> ,
receiver : chan :: Receiver<String> , mut name : String ,
quit_flag : Arc<Mutex<Quit_flag>> , group_chat : Arc<Mutex<Group_chat>> , chat_name : String) {
let mut clone_stream = stream.try_clone().unwrap() ;
let mut clone_stream2 = stream.try_clone().unwrap() ;
let sender = sender.clone() ;
let receiver = receiver.clone() ;
let name2 = name.clone();
name.pop();
name.pop();
let mut name_2 = name.clone();
let quit_flag_thread1 = quit_flag.clone();
let quit_flag_thread2 = quit_flag.clone();
let group_chat1 = group_chat.clone() ;
thread:: spawn(move || { // Recieving from group chat
loop{
if quit_flag_thread1.lock().unwrap().get(){
group_chat1.lock().unwrap().remove_member(chat_name.clone() , name2);
break;
}
let rec_message = receiver.recv().unwrap();
let mut rec_message_temp = rec_message.clone();
rec_message_temp.truncate(name.len());
if rec_message_temp != name.clone() {
clone_stream.write(rec_message.as_bytes());
}
}
});
thread:: spawn(move || { //send to group chat
loop {
let mut read_method = BufReader::new(&clone_stream2) ;
let mut my_string = String :: new() ;
read_method.read_line(&mut my_string) ;
my_string.pop();
my_string.pop();
if my_string == "QUIT".to_string() {
quit_flag_thread2.lock().unwrap().set() ;
my_string = name_2.clone() + " has left the group chat.\n";
sender.send(my_string);
break ;
}
my_string = name_2.clone() + " : "+ &my_string + "\n";
sender.send(my_string);
}
});
}
fn create_chatroom(group_chat : Arc<Mutex<Group_chat>> , name : String) {
let (sender , receiver) = chan :: sync(100000);
{
group_chat.lock().unwrap().create_group(name.clone() , sender.clone() , receiver.clone());
}
thread :: spawn(move || {
loop {
let line : String = receiver.recv().unwrap() ;
let list_sender = group_chat.lock().unwrap().get_sender_list(name.clone());
for x in 0..list_sender.len(){
list_sender[x].send(line.clone());
}
}
});
}
struct Quit_flag{
flag : bool,
}
impl Quit_flag {
fn new()->Quit_flag{
Quit_flag{
flag : false,
}
}
fn set(&mut self){
self.flag = true;
}
fn get(&mut self)->bool{
if self.flag {
return true ;
}
else {
return false;
}
}
}
struct User_info{
name : String ,
password : String,
friend_list : HashSet<String>,
busy : bool ,
private_chat : bool ,
}
struct User_info_map{
map : HashMap<String , User_info>
}
impl User_info_map{
fn new()->User_info_map{
let mut map : HashMap<String , User_info> = HashMap :: new() ;
let mut f = File::open("User_info.txt").unwrap() ;
let mut s = String::new();
f.read_to_string(&mut s);
let mut split = s.split("////////");
for temp in split {
let mut split2 = temp.split(" ");
let vec: Vec<&str> = split2.collect() ;
let mut set_frds : HashSet<String> = HashSet :: new() ;
if vec.len() > 2 {
for x in 2..vec.len(){
set_frds.insert(vec[x].to_string().clone());
}
}
let mut user = User_info{name:vec[0].to_string().clone() , password : vec[1].to_string().clone() ,
friend_list:set_frds , busy : false , private_chat : false};
map.insert(vec[0].to_string().clone() , user);
}
User_info_map{
map : map
}
}
fn get_users(&mut self) -> Vec<String>{
let mut vec : Vec<String> = Vec :: new() ;
for key in self.map.keys(){
vec.push(key.clone());
}
return vec ;
}
fn contains_user(&mut self , name : String) -> bool{
return self.map.contains_key(& name);
}
fn get_password(&mut self , name : String) -> String {
self.map.get(&name).unwrap().password.clone()
}
fn Create_user(&mut self , name : String , password : String) {
let mut options = OpenOptions::new();
options.write(true).append(true);
let file = match options.open("User_info.txt") {
Ok(file) => file,
Err(..) => panic!("wth"),
};
let mut writer = BufWriter::new(&file);
writer.write("////////".to_string().as_bytes());
writer.write(name.as_bytes());
writer.write(" ".to_string().as_bytes());
writer.write(password.as_bytes());
let temp = User_info{name : name.clone() , password : password.clone() ,
friend_list : HashSet::new() , busy : false, private_chat : false};
self.map.insert(name , temp);
}
fn add_friend(&mut self , name : String , friend : String){
let mut options = OpenOptions::new();
options.write(true).append(true);
let file = match options.open("User_info.txt") {
Ok(file) => file,
Err(..) => panic!("wth"),
};
let mut writer = BufWriter::new(&file);
writer.write("////////".to_string().as_bytes());
writer.write(name.as_bytes());
writer.write(" ".to_string().as_bytes());
writer.write(self.map.get(&name).unwrap().password.clone().as_bytes());
for x in self.map.get(&name).unwrap().friend_list.iter() {
writer.write(" ".to_string().as_bytes());
writer.write(x.clone().as_bytes());
}
writer.write(" ".to_string().as_bytes());
writer.write(friend.as_bytes());
self.map.get_mut(&name).unwrap().friend_list.insert(friend);
}
fn get_friend_list(&mut self , name : String) ->Vec<String>{
let mut set : HashSet<String> = HashSet::new();
let mut vec : Vec<String> = Vec :: new() ;
for x in self.map.get(&name).unwrap().friend_list.iter() {
vec.push(x.clone());
}
return vec ;
}
fn set_busy_true(&mut self , user_name : String){
self.map.get_mut(&user_name).unwrap().busy = true;
}
fn set_busy_false(&mut self , user_name : String){
self.map.get_mut(&user_name).unwrap().busy = false;
}
fn get_busy(&mut self , user_name : String) -> bool{
if(self.map.get(&user_name).unwrap().busy){
return true;
}
else {
return false;
}
}
fn set_private_chat_true(&mut self , user_name : String){
self.map.get_mut(&user_name).unwrap().private_chat = true;
}
fn set_private_chat_false(&mut self , user_name : String){
self.map.get_mut(&user_name).unwrap().private_chat = false;
}
fn get_priavte_chat(&mut self , user_name : String) -> bool{
if(self.map.get(&user_name).unwrap().private_chat){
return true;
}
else {
return false;
}
}
}
struct channels {
sender : chan ::Sender<String> ,
receiver : chan :: Receiver<String>,
list_sender : HashMap<String , chan :: Sender<String>> ,
}
impl channels {
fn add_member(&mut self , name : String,sender: chan :: Sender<String> ) {
self.list_sender.insert(name , sender);
}
}
struct Group_chat {
map : HashMap<String , channels>
}
impl Group_chat {
fn new() -> Group_chat{
Group_chat{
map : HashMap :: new()
}
}
fn create_group(&mut self , name: String , sender : chan ::Sender<String> , receiver : chan :: Receiver<String>) {
let mut temp = channels{sender : sender , receiver : receiver,list_sender : HashMap:: new()} ;
self.map.insert(name , temp);
}
fn get_sender(&mut self , name: String) -> chan::Sender<String>{
match self.map.get(&name) {
Some(temp) => {
return temp.sender.clone()
}
None => {
panic!("no such group") ;
}
}
}
fn get_receiver(&mut self , name: String) -> chan::Receiver<String>{
match self.map.get(&name) {
Some(temp) => {
return temp.receiver.clone()
}
None => {
panic!("no such group") ;
}
}
}
fn add_member (&mut self , chat_name: String ,users_name : String ,sender : chan ::Sender<String> ){
self.map.get_mut(&chat_name).unwrap().add_member(users_name , sender);
}
fn get_sender_list(&mut self , name : String) -> Vec<chan :: Sender<String>>{
let mut vec = Vec :: new();
for key in self.map.get(&name).unwrap().list_sender.keys(){
vec.push(self.map.get(&name).unwrap().list_sender.get(key).unwrap().clone());
}
vec
}
fn get_chatroom_list(&mut self) -> HashSet<String>{
let mut set : HashSet<String> = HashSet :: new() ;
for key in self.map.keys(){
set.insert(key.clone());
}
return set ;
}
fn remove_member(&mut self , chat_room : String , name : String){
let temp : &mut channels = self.map.get_mut(&chat_room).unwrap();
temp.list_sender.remove(&name);
}
} |
use crate::message_prelude::*;
use graphene::color::Color;
use crate::input::InputPreprocessor;
use crate::{
document::DocumentMessageHandler,
tool::{tool_options::ToolOptions, DocumentToolData, ToolFsmState, ToolType},
};
use std::collections::VecDeque;
#[impl_message(Message, Tool)]
#[derive(PartialEq, Clone, Debug)]
pub enum ToolMessage {
SelectTool(ToolType),
SelectPrimaryColor(Color),
SelectSecondaryColor(Color),
SwapColors,
ResetColors,
SetToolOptions(ToolType, ToolOptions),
#[child]
Fill(FillMessage),
#[child]
Rectangle(RectangleMessage),
#[child]
Ellipse(EllipseMessage),
#[child]
Select(SelectMessage),
#[child]
Line(LineMessage),
#[child]
Crop(CropMessage),
#[child]
Eyedropper(EyedropperMessage),
#[child]
Navigate(NavigateMessage),
#[child]
Path(PathMessage),
#[child]
Pen(PenMessage),
#[child]
Shape(ShapeMessage),
}
#[derive(Debug, Default)]
pub struct ToolMessageHandler {
tool_state: ToolFsmState,
}
impl MessageHandler<ToolMessage, (&DocumentMessageHandler, &InputPreprocessor)> for ToolMessageHandler {
fn process_action(&mut self, message: ToolMessage, data: (&DocumentMessageHandler, &InputPreprocessor), responses: &mut VecDeque<Message>) {
let (document, input) = data;
use ToolMessage::*;
match message {
SelectPrimaryColor(c) => {
self.tool_state.document_tool_data.primary_color = c;
update_working_colors(&self.tool_state.document_tool_data, responses);
}
SelectSecondaryColor(c) => {
self.tool_state.document_tool_data.secondary_color = c;
update_working_colors(&self.tool_state.document_tool_data, responses);
}
SelectTool(tool) => {
let mut reset = |tool| match tool {
ToolType::Ellipse => responses.push_back(EllipseMessage::Abort.into()),
ToolType::Rectangle => responses.push_back(RectangleMessage::Abort.into()),
ToolType::Shape => responses.push_back(ShapeMessage::Abort.into()),
ToolType::Line => responses.push_back(LineMessage::Abort.into()),
ToolType::Pen => responses.push_back(PenMessage::Abort.into()),
_ => (),
};
reset(tool);
reset(self.tool_state.tool_data.active_tool_type);
self.tool_state.tool_data.active_tool_type = tool;
responses.push_back(FrontendMessage::SetActiveTool { tool_name: tool.to_string() }.into())
}
SwapColors => {
let doc_data = &mut self.tool_state.document_tool_data;
std::mem::swap(&mut doc_data.primary_color, &mut doc_data.secondary_color);
update_working_colors(doc_data, responses);
}
ResetColors => {
let doc_data = &mut self.tool_state.document_tool_data;
doc_data.primary_color = Color::BLACK;
doc_data.secondary_color = Color::WHITE;
update_working_colors(doc_data, responses);
}
SetToolOptions(tool_type, tool_options) => {
self.tool_state.document_tool_data.tool_options.insert(tool_type, tool_options);
}
message => {
let tool_type = match message {
Fill(_) => ToolType::Fill,
Rectangle(_) => ToolType::Rectangle,
Ellipse(_) => ToolType::Ellipse,
Shape(_) => ToolType::Shape,
Line(_) => ToolType::Line,
Pen(_) => ToolType::Pen,
Select(_) => ToolType::Select,
Crop(_) => ToolType::Crop,
Eyedropper(_) => ToolType::Eyedropper,
Navigate(_) => ToolType::Navigate,
Path(_) => ToolType::Path,
_ => unreachable!(),
};
if let Some(tool) = self.tool_state.tool_data.tools.get_mut(&tool_type) {
tool.process_action(message, (document, &self.tool_state.document_tool_data, input), responses);
}
}
}
}
fn actions(&self) -> ActionList {
let mut list = actions!(ToolMessageDiscriminant; ResetColors, SwapColors, SelectTool, SetToolOptions);
list.extend(self.tool_state.tool_data.active_tool().actions());
list
}
}
fn update_working_colors(doc_data: &DocumentToolData, responses: &mut VecDeque<Message>) {
responses.push_back(
FrontendMessage::UpdateWorkingColors {
primary: doc_data.primary_color,
secondary: doc_data.secondary_color,
}
.into(),
);
}
|
use std::fmt;
use std::fmt::Write;
use crate::strum::IntoEnumIterator;
#[derive(Debug, Clone, PartialEq)]
pub struct ApiError {
pub kind: ApiErrorKind,
pub msg: String,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ApiErrorKind {
InvalidFilter,
InvalidFilterValue,
InvalidStructure,
}
impl fmt::Display for ApiError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Type:{:?}, Msg: {}", self.kind, self.msg)
}
}
impl std::error::Error for ApiError {}
#[derive(EnumIter, Debug, PartialEq)]
/// All valid Filter fields
///
/// Given by: https://coronavirus.data.gov.uk/developers-guide#params-filters
///
/// Last update: 2020/09/12
///
/// Field Descriptions:
///
/// <br> areaType - Area type as string
/// <br> areaName - Area name as string
/// <br> areaCode - Area Code as string
/// <br> date - Date as string [YYYY-MM-DD]
pub enum Filters {
areaType,
areaName,
areaCode,
date,
}
impl Filters {
/// Exports all valid filter fields to a Vec of strings
pub fn to_vec() -> Vec<String> {
let mut filter_str = Vec::new();
for filter in Filters::iter() {
filter_str.push(format!("{:?}", filter));
}
filter_str
}
/// Creates a string containing all valid filter fields, on separate lines
pub fn to_string() -> String {
let mut filter_str = String::new();
for filter in Filters::iter() {
filter_str.push_str(&format!("{:?}", filter));
}
filter_str
}
/// Creates a string containing all filter field descriptions, on separate lines
pub fn field_descriptions() -> String {
String::from(
"\n areaType - Area type as string
\n areaName - Area name as string
\n areaCode - Area Code as string
\n date - Date as string [YYYY-MM-DD]",
)
}
}
#[derive(EnumIter, Debug)]
/// All valid AreaType values
///
/// Given by: https://coronavirus.data.gov.uk/developers-guide#params-structure
///
/// Last update: 2020/09/12
///
/// Field Descriptions:
///
/// <br> overview - Overview data for the United Kingdom
/// <br> nation - Nation data (England, Northern Ireland, Scotland, and Wales)
/// <br> region - Region data
/// <br> nhsRegion - NHS Region data
/// <br> utla - Upper-tier local authority data
/// <br> ltla - Lower-tier local authority data
pub enum AreaType {
overview,
nation,
region,
nhsRegion,
utla,
ltla,
}
impl AreaType {
/// Exports all valid AreaType values to a Vec of strings
pub fn to_vec() -> Vec<String> {
let mut area_type_str = Vec::new();
for areaType in AreaType::iter() {
area_type_str.push(format!("{:?}", areaType));
}
area_type_str
}
/// Creates a string containing all valid AreaType values, on separate lines
pub fn to_string() -> String {
let mut area_type_str = String::new();
for areaType in AreaType::iter() {
area_type_str.push_str(&format!("{:?}", areaType));
}
area_type_str
}
/// Creates a string containing all structure field descriptions, on separate lines
pub fn field_description() -> String {
String::from(
"\n overview - Overview data for the United Kingdom
\n nation - Nation data (England, Northern Ireland, Scotland, and Wales)
\n region - Region data
\n nhsRegion - NHS Region data
\n utla - Upper-tier local authority data
\n ltla - Lower-tier local authority data",
)
}
}
#[derive(EnumIter, Debug)]
/// All valid structure fields
///
/// Given by: https://coronavirus.data.gov.uk/developers-guide#params-structure
///
/// Last update: 2020/09/12
///
/// Field Descriptions:
///
/// <br> areaType - Area type as string
/// <br> areaName - Area name as string
/// <br> areaCode - Area Code as string
/// <br> date - Date as string [YYYY-MM-DD]
/// <br> hash - Unique ID as string
/// <br>
/// <br> newCasesByPublishDate - New cases by publish date
/// <br> cumCasesByPublishDate - Cumulative cases by publish date
/// <br> cumCasesBySpecimenDateRate - Rate of cumulative cases by publish date per 100k resident population
/// <br> newCasesBySpecimenDate - New cases by specimen date
/// <br> cumCasesBySpecimenDateRate - Rate of cumulative cases by specimen date per 100k resident population
/// <br> cumCasesBySpecimenDate - Cumulative cases by specimen date
/// <br> maleCases - Male cases (by age)
/// <br> femaleCases - Female cases (by age)
/// <br>
/// <br> newPillarOneTestsByPublishDate - New pillar one tests by publish date
/// <br> cumPillarOneTestsByPublishDate - Cumulative pillar one tests by publish date
/// <br> newPillarTwoTestsByPublishDate - New pillar two tests by publish date
/// <br> cumPillarTwoTestsByPublishDate - Cumulative pillar two tests by publish date
/// <br> newPillarThreeTestsByPublishDate - New pillar three tests by publish date
/// <br> cumPillarThreeTestsByPublishDate - Cumulative pillar three tests by publish date
/// <br> newPillarFourTestsByPublishDate - New pillar four tests by publish date
/// <br> cumPillarFourTestsByPublishDate - Cumulative pillar four tests by publish date
/// <br>
/// <br> newAdmissions - New admissions
/// <br> cumAdmissions - Cumulative number of admissions
/// <br> cumAdmissionsByAge - Cumulative admissions by age
/// <br> cumTestsByPublishDate - Cumulative tests by publish date
/// <br> newTestsByPublishDate - New tests by publish date
/// <br> covidOccupiedMVBeds - COVID-19 occupied beds with mechanical ventilators
/// <br> hospitalCases - Hospital cases
/// <br> plannedCapacityByPublishDate - Planned capacity by publish date
/// <br>
/// <br> newDeaths28DaysByPublishDate - Deaths within 28 days of positive test
/// <br> cumDeaths28DaysByPublishDate - Cumulative deaths within 28 days of positive test
/// <br> cumDeaths28DaysByPublishDateRate - Rate of cumulative deaths within 28 days of positive test per 100k resident population
/// <br> newDeaths28DaysByDeathDate - Deaths within 28 days of positive test by death date
/// <br> cumDeaths28DaysByDeathDate - Cumulative deaths within 28 days of positive test by death date
/// <br> cumDeaths28DaysByDeathDateRate - Rate of cumulative deaths within 28 days of positive test by death date per 100k resident population
pub enum Structures {
areaType,
areaName,
areaCode,
date,
hash,
newCasesByPublishDate,
cumCasesByPublishDate,
cumCasesBySpecimenDateRate,
newCasesBySpecimenDate,
maleCases,
femaleCases,
newPillarOneTestsByPublishDate,
cumPillarOneTestsByPublishDate,
newPillarTwoTestsByPublishDate,
cumPillarTwoTestsByPublishDate,
newPillarThreeTestsByPublishDate,
cumPillarThreeTestsByPublishDate,
newPillarFourTestsByPublishDate,
cumPillarFourTestsByPublishDate,
newAdmissions,
cumAdmissions,
cumAdmissionsByAge,
newTestsByPublishDate,
cumTestsByPublishDate,
covidOccupiedMVBeds,
hospitalCases,
plannedCapacityByPublishDate,
newDeaths28DaysByPublishDate,
cumDeaths28DaysByPublishDate,
cumDeaths28DaysByPublishDateRate,
newDeaths28DaysByDeathDate,
cumDeaths28DaysByDeathDate,
cumDeaths28DaysByDeathDateRate,
}
impl Structures {
/// Exports all valid structure fields to a Vec of strings
pub fn to_vec() -> Vec<String> {
let mut structure_str = Vec::new();
for structure in Structures::iter() {
structure_str.push(format!("{:?}", structure));
}
structure_str
}
/// Creates a string containing all valid structure fields, on separate lines
pub fn to_string() -> String {
let mut structure_str = String::new();
for structure in Structures::iter() {
structure_str.push_str(&format!("{:?}\n", structure));
}
structure_str
}
/// Creates a string containing all structure field descriptions, on separate lines
pub fn field_description() -> String {
String::from("\n areaType - Area type as string
\n areaName - Area name as string
\n areaCode - Area Code as string
\n date - Date as string [ - YYYY-MM-DD - ]
\n hash - Unique ID as string
\n
\n newCasesByPublishDate - New cases by publish date
\n cumCasesByPublishDate - Cumulative cases by publish date
\n cumCasesBySpecimenDateRate - Rate of cumulative cases by publish date per 100k resident population
\n newCasesBySpecimenDate - New cases by specimen date
\n cumCasesBySpecimenDateRate - Rate of cumulative cases by specimen date per 100k resident population
\n cumCasesBySpecimenDate - Cumulative cases by specimen date
\n maleCases - Male cases (by age)
\n femaleCases - Female cases (by age)
\n
\n newPillarOneTestsByPublishDate - New pillar one tests by publish date
\n cumPillarOneTestsByPublishDate - Cumulative pillar one tests by publish date
\n newPillarTwoTestsByPublishDate - New pillar two tests by publish date
\n cumPillarTwoTestsByPublishDate - Cumulative pillar two tests by publish date
\n newPillarThreeTestsByPublishDate - New pillar three tests by publish date
\n cumPillarThreeTestsByPublishDate - Cumulative pillar three tests by publish date
\n newPillarFourTestsByPublishDate - New pillar four tests by publish date
\n cumPillarFourTestsByPublishDate - Cumulative pillar four tests by publish date
\n
\n newAdmissions - New admissions
\n cumAdmissions - Cumulative number of admissions
\n cumAdmissionsByAge - Cumulative admissions by age
\n cumTestsByPublishDate - Cumulative tests by publish date
\n newTestsByPublishDate - New tests by publish date
\n covidOccupiedMVBeds - COVID-19 occupied beds with mechanical ventilators
\n hospitalCases - Hospital cases
\n plannedCapacityByPublishDate - Planned capacity by publish date
\n
\n newDeaths28DaysByPublishDate - Deaths within 28 days of positive test
\n cumDeaths28DaysByPublishDate - Cumulative deaths within 28 days of positive test
\n cumDeaths28DaysByPublishDateRate - Rate of cumulative deaths within 28 days of positive test per 100k resident population
\n newDeaths28DaysByDeathDate - Deaths within 28 days of positive test by death date
\n cumDeaths28DaysByDeathDate - Cumulative deaths within 28 days of positive test by death date
\n cumDeaths28DaysByDeathDateRate - Rate of cumulative deaths within 28 days of positive test by death date per 100k resident population")
}
}
|
/* This is part of mktcb - which is under the MIT License ********************/
// Traits ---------------------------------------------------------------------
use std::io::Write;
// ----------------------------------------------------------------------------
use std::path::PathBuf;
use std::process::Command;
use snafu::{ResultExt, ensure};
use crate::error::Result;
use crate::error;
use crate::config::Config;
use crate::download;
use crate::patch;
use crate::util;
use crate::toolchain::Toolchain;
use crate::interrupt::Interrupt;
pub struct Uboot {
download_dir: PathBuf,
source_dir: PathBuf,
build_dir: PathBuf,
patches_dir: PathBuf,
version: String,
version_file: PathBuf,
config: Option<PathBuf>,
url: url::Url,
interrupt: Interrupt,
arch: String,
jobs: usize,
}
impl Uboot {
fn write_version(&self) -> Result<()> {
let mut file = std::fs::File::create(&self.version_file).context(
error::CreateFileError{path: self.version_file.clone()})?;
write!(file, "{}", self.version)
.context(error::FailedToWrite{path: self.version_file.clone()})?;
Ok(())
}
fn download(&self) -> Result<()> {
let mut http_handle = curl::easy::Easy::new();
download::to_unpacked_dir(
&mut http_handle, &self.url, &self.download_dir, &self.source_dir)?;
// Copy the initial configuration, if any
util::copy_config(&self.config, &self.build_dir)?;
// Apply patches on the working directory and then write the version.
// A sigint may not interrupt this...
self.interrupt.lock();
patch::apply_patches_in(&self.patches_dir, &self.source_dir)?;
self.write_version()
}
pub fn make(&self, make_target: &str, toolchain: &Toolchain) -> Result<()> {
toolchain.fetch()?;
let status = Command::new("make")
.arg(format!("O={}", self.build_dir.to_str().unwrap()))
.arg(format!("ARCH={}", self.arch))
.arg(format!("CROSS_COMPILE={}", toolchain.cross_compile))
.arg("-C").arg(self.source_dir.clone())
.arg(format!("-j{}", self.jobs))
.arg("--")
.arg(make_target)
.status()
.context(error::ProgFailed{ proc: "make".to_string() })?;
ensure!(status.success(), error::MakeFailed{
target: make_target.to_string() });
Ok(())
}
pub fn fetch(&self) -> Result<()> {
if ! self.version_file.exists() {
ensure!(! self.source_dir.exists(), error::CorruptedSourceDir{
dir: self.source_dir.clone(),
version_file: self.version_file.clone(),
});
self.download()
} else {
Ok(())
}
}
}
/// Compose a path involving a given U-Boot version
fn make_version_dir(base_dir: &PathBuf, version: &str) -> PathBuf {
let mut path = base_dir.clone();
path.push(format!("u-boot-{}", version));
path
}
fn make_patches_dir(base_dir: &PathBuf, version: &str) -> PathBuf {
let mut path = base_dir.clone();
path.push("patches");
path.push("uboot");
path.push(version);
path
}
pub fn new(config: &Config, interrupt: Interrupt) -> Result<Uboot> {
let version = config.uboot.version.clone();
let url = format!("ftp://ftp.denx.de/pub/u-boot/u-boot-{}.tar.bz2", version);
// Compose the path to the version file
let mut v_file = config.download_dir.clone();
v_file.push(format!("u-boot-{}.version", version));
Ok(Uboot {
download_dir: config.download_dir.clone(),
source_dir: make_version_dir(&config.download_dir, &version),
build_dir: make_version_dir(&config.build_dir, &version),
patches_dir: make_patches_dir(&config.lib_dir, &version),
version_file: v_file,
url: url::Url::parse(&url).context(error::InvalidUbootURL{})?,
config: config.uboot.config.clone(),
version: version,
arch: config.toolchain.uboot_arch.clone(),
interrupt: interrupt,
jobs: config.jobs,
})
}
|
trait SpecialObject {
fn i_am_special(&self) -> &str;
}
#[derive(Debug)]
struct Object {
id: String,
value: i32
}
impl Object {
fn new(id: String, value: i32) -> Self {
Object {
id,
value
}
}
}
impl SpecialObject for Object{
fn i_am_special(&self) -> &str {
self.id.as_str()
}
}
fn main() {
let my_object = Object::new("what?".to_string(), 42);
println!("I am special: {}", my_object.i_am_special());
println!("{:?}", my_object);
} |
use common::result::Result;
pub trait PasswordHasher: Sync + Send {
fn hash(&self, plain_password: &str) -> Result<String>;
fn compare(&self, hashed_password: &str, plain_password: &str) -> bool;
}
|
//! Classification of bytes withing JSON quote sequences.
//!
//! Provides the [`QuoteClassifiedBlock`] struct and [`QuoteClassifiedIterator`] trait
//! that allow effectively enriching JSON inputs with quote sequence information.
//!
//! The output of quote classification is an iterator of [`QuoteClassifiedBlock`]
//! which contain bitmasks whose lit bits signify characters that are within quotes
//! in the source document. These characters need to be ignored.
//!
//! Note that the actual quote characters are not guaranteed to be classified
//! as "within themselves" or otherwise. In particular the current implementation
//! marks _opening_ quotes with lit bits, but _closing_ quotes are always unmarked.
//! This behavior should not be presumed to be stable, though, and can change
//! without a major semver bump.
//!
//! # Examples
//! ```
//! use rsonpath::classification::quotes::{classify_quoted_sequences, QuoteClassifiedIterator};
//! use rsonpath::input::{Input, OwnedBytes};
//! use rsonpath::result::empty::EmptyRecorder;
//! use rsonpath::FallibleIterator;
//!
//! let json = r#"{"x": "string", "y": {"z": "\"escaped\""}}"#.to_owned();
//! // 011000111111100011000011000111111111111000
//! // The mask below appears reversed due to endianness.
//! let expd = 0b000111111111111000110000110001111111000110;
//! let input = OwnedBytes::try_from(json).unwrap();
//! let iter = input.iter_blocks::<_, 64>(&EmptyRecorder);
//! let mut quote_classifier = classify_quoted_sequences(iter);
//!
//! let block = quote_classifier.next().unwrap().unwrap();
//! assert_eq!(expd, block.within_quotes_mask);
//! ```
use crate::{
input::{error::InputError, InputBlock, InputBlockIterator},
FallibleIterator, MaskType, BLOCK_SIZE,
};
use cfg_if::cfg_if;
/// Input block with a bitmask signifying which characters are within quotes.
///
/// Characters within quotes in the input are guaranteed to have their corresponding
/// bit in `within_quotes_mask` set. The $0$-th bit of the mask corresponds to the
/// last character in `block`, the $1$-st bit to the second-to-last character, etc.
///
/// There is no guarantee on how the boundary quote characters are classified,
/// their bits might be lit or not lit depending on the implementation.
pub struct QuoteClassifiedBlock<B, M, const N: usize> {
/// The block that was classified.
pub block: B,
/// Mask marking characters within a quoted sequence.
pub within_quotes_mask: M,
}
/// Trait for quote classifier iterators, i.e. finite iterators
/// enriching blocks of input with quote bitmasks.
/// Iterator is allowed to hold a reference to the JSON document valid for `'a`.
pub trait QuoteClassifiedIterator<'i, I: InputBlockIterator<'i, N>, M, const N: usize>:
FallibleIterator<Item = QuoteClassifiedBlock<I::Block, M, N>, Error = InputError>
{
/// Get the total offset in bytes from the beginning of input.
fn get_offset(&self) -> usize;
/// Move the iterator `count` blocks forward.
/// Effectively skips `count * Twice<BlockAlignment>::size()` bytes.
fn offset(&mut self, count: isize);
/// Flip the bit representing whether the last block ended with a nonescaped quote.
///
/// This should be done only in very specific circumstances where the previous-block
/// state could have been damaged due to stopping and resuming the classification at a later point.
fn flip_quotes_bit(&mut self);
}
/// Higher-level classifier that can be consumed to retrieve the inner
/// [`Input::BlockIterator`](crate::input::Input::BlockIterator).
pub trait InnerIter<I> {
/// Consume `self` and return the wrapped [`Input::BlockIterator`](crate::input::Input::BlockIterator).
fn into_inner(self) -> I;
}
impl<'i, B, M, const N: usize> QuoteClassifiedBlock<B, M, N>
where
B: InputBlock<'i, N>,
{
/// Returns the length of the classified block.
#[must_use]
#[inline(always)]
pub fn len(&self) -> usize {
self.block.len()
}
/// Whether the classified block is empty.
#[must_use]
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.block.is_empty()
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod avx2_32;
#[cfg(target_arch = "x86_64")]
mod avx2_64;
mod nosimd;
mod shared;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod ssse3_32;
#[cfg(target_arch = "x86_64")]
mod ssse3_64;
cfg_if! {
if #[cfg(any(doc, not(feature = "simd")))] {
type ClassifierImpl<'i, I, const N: usize> = nosimd::SequentialQuoteClassifier<'i, I, N>;
}
else if #[cfg(all(simd = "avx2_64", target_arch = "x86_64"))] {
type ClassifierImpl<'i, I> = avx2_64::Avx2QuoteClassifier64<'i, I>;
}
else if #[cfg(all(simd = "avx2_32", any(target_arch = "x86_64", target_arch = "x86")))] {
type ClassifierImpl<'i, I> = avx2_32::Avx2QuoteClassifier32<'i, I>;
}
else if #[cfg(all(simd = "ssse3_64", target_arch = "x86_64"))] {
type ClassifierImpl<'i, I> = ssse3_64::Ssse3QuoteClassifier64<'i, I>;
}
else if #[cfg(all(simd = "ssse3_32", any(target_arch = "x86_64", target_arch = "x86")))] {
type ClassifierImpl<'i, I> = ssse3_32::Ssse3QuoteClassifier32<'i, I>;
}
else {
compile_error!("Target architecture is not supported by SIMD features of this crate. Disable the default `simd` feature.");
}
}
/// Walk through the JSON document represented by `bytes`
/// and classify quoted sequences.
#[must_use]
#[inline(always)]
pub fn classify_quoted_sequences<'i, I>(
iter: I,
) -> impl QuoteClassifiedIterator<'i, I, MaskType, BLOCK_SIZE> + InnerIter<I>
where
I: InputBlockIterator<'i, BLOCK_SIZE>,
{
ClassifierImpl::new(iter)
}
pub(crate) fn resume_quote_classification<'i, I>(
iter: I,
first_block: Option<I::Block>,
) -> (
impl QuoteClassifiedIterator<'i, I, MaskType, BLOCK_SIZE> + InnerIter<I>,
Option<QuoteClassifiedBlock<I::Block, MaskType, BLOCK_SIZE>>,
)
where
I: InputBlockIterator<'i, BLOCK_SIZE>,
{
ClassifierImpl::resume(iter, first_block)
}
|
mod common;
use common::Sudoku6x6;
use dancing_links::{
sudoku::{self, Sudoku},
Solver,
};
use crate::common::{format_sudoku_possibilities, parse_sudoku_possibilities};
// Basing these exact counts off of https://en.wikipedia.org/wiki/Mathematics_of_Sudoku#Sudoku_with_rectangular_regions
#[test]
#[cfg_attr(miri, ignore)]
fn enumerate_all_sudoku_solutions_small() {
let puzzle_4x4 = Sudoku::new(2, std::iter::empty());
let solver_4x4 = Solver::new(&puzzle_4x4);
assert_eq!(solver_4x4.count(), 288);
}
#[test]
#[ignore]
// This takes too long to run. The test below generates 10,000 solutions in 1
// minute, which would give 47 hours to complete this test.
//
// In release mode this should finish in around 1.33 hours.
fn enumerate_all_sudoku_solutions_large() {
let puzzle_6x6 = Sudoku6x6::empty();
let solver_6x6 = Solver::new(&puzzle_6x6);
assert_eq!(solver_6x6.count(), 28_200_960);
}
#[test]
#[ignore]
// This test takes 0.967 minutes to run, which is too long for a normal suite.
//
// In release mode this runs in 1.7 seconds, which is fine.
fn enumerate_many_sudoku_solutions() {
let puzzle_6x6 = Sudoku6x6::empty();
let solver_6x6 = Solver::new(&puzzle_6x6);
// Assert that the number of solutions is at least 10,000.
assert_eq!(solver_6x6.take(10_000).count(), 10_000);
}
#[test]
#[cfg_attr(miri, ignore)]
fn single_sudoku_test() {
env_logger::init();
let sudoku_input =
"006008047000607200304009060003100005010020480740005009020930600081000034905006170";
let expected_solved_sudoku =
"296318547158647293374259861863194725519723486742865319427931658681572934935486172";
let (puzzle, filled_values) = parse_sudoku_possibilities(sudoku_input, 3);
let mut solver = Solver::new(&puzzle);
let solutions = solver.all_solutions();
assert_eq!(solutions.len(), 1);
let solution = &solutions[0];
let actual_solved_sudoku = format_sudoku_possibilities(
filled_values
.into_iter()
.map(|poss| sudoku::Possibility::from_latin(poss, 3))
.chain(solution.iter().map(|p| **p)),
3,
);
assert_eq!(actual_solved_sudoku, expected_solved_sudoku);
}
|
//! This module defines the function pointers for supported traits from the standard library.
//!
//! `CloneFromFn` and `DropFn` enable the use of `VecClone`.
//!
//! The remaining traits improve compatibility with the rest of the standard library.
use crate::bytes::*;
use dyn_derive::dyn_trait_method;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::mem::ManuallyDrop;
pub trait DropBytes {
unsafe fn drop_bytes(bytes: &mut [u8]);
}
pub trait CloneBytes: Clone {
#[dyn_trait_method]
fn clone(&self) -> Self;
//unsafe fn clone_bytes(src: &[u8]) -> Box<[u8]>;
#[dyn_trait_method]
fn clone_from(&mut self, src: &Self);
//unsafe fn clone_from_bytes(dst: &mut [u8], src: &[u8]);
/// Clone without dropping the destination bytes.
unsafe fn clone_into_raw_bytes(src: &[u8], dst: &mut [u8]);
}
pub trait PartialEqBytes: PartialEq {
#[dyn_trait_method]
fn eq(&self, other: &Self) -> bool;
//unsafe fn eq_bytes(a: &[u8], b: &[u8]) -> bool;
}
pub trait EqBytes: PartialEqBytes + Eq {}
pub trait HashBytes: Hash {
#[dyn_trait_method]
fn hash<H: Hasher>(&self, state: &mut H);
//unsafe fn hash_bytes(bytes: &[u8], state: &mut dyn Hasher);
}
pub trait DebugBytes: fmt::Debug {
#[dyn_trait_method]
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error>;
//unsafe fn fmt_bytes(bytes: &[u8], f: &mut fmt::Formatter) -> Result<(), fmt::Error>;
}
impl<T: 'static> DropBytes for T {
#[inline]
unsafe fn drop_bytes(bytes: &mut [u8]) {
let md: &mut ManuallyDrop<T> = Bytes::from_bytes_mut(bytes);
ManuallyDrop::drop(md);
}
}
impl<T: Clone + 'static> CloneBytes for T {
#[inline]
unsafe fn clone_bytes(src: &[u8]) -> Box<[u8]> {
let typed_src: &T = Bytes::from_bytes(src);
Bytes::box_into_box_bytes(Box::new(typed_src.clone()))
}
#[inline]
unsafe fn clone_from_bytes(dst: &mut [u8], src: &[u8]) {
let typed_src: &T = Bytes::from_bytes(src);
let typed_dst: &mut T = Bytes::from_bytes_mut(dst);
typed_dst.clone_from(typed_src);
}
#[inline]
unsafe fn clone_into_raw_bytes(src: &[u8], dst: &mut [u8]) {
let typed_src: &T = Bytes::from_bytes(src);
let cloned = T::clone(typed_src);
let cloned_bytes = Bytes::as_bytes(&cloned);
dst.copy_from_slice(cloned_bytes);
let _ = ManuallyDrop::new(cloned);
}
}
impl<T: PartialEq + 'static> PartialEqBytes for T {
#[inline]
unsafe fn eq_bytes(a: &[u8], b: &[u8]) -> bool {
let (a, b): (&T, &T) = (Bytes::from_bytes(a), Bytes::from_bytes(b));
a.eq(b)
}
}
impl<T: PartialEqBytes + Eq> EqBytes for T {}
impl<T: Hash + 'static> HashBytes for T {
#[inline]
unsafe fn hash_bytes(bytes: &[u8], mut state: &mut dyn Hasher) {
let typed_data: &T = Bytes::from_bytes(bytes);
typed_data.hash(&mut state)
}
}
impl<T: fmt::Debug + 'static> DebugBytes for T {
#[inline]
unsafe fn fmt_bytes(bytes: &[u8], f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let typed_data: &T = Bytes::from_bytes(bytes);
typed_data.fmt(f)
}
}
pub(crate) type CloneFnType = unsafe fn(&[u8]) -> Box<[u8]>;
pub(crate) type CloneFromFnType = unsafe fn(&mut [u8], &[u8]);
pub(crate) type CloneIntoRawFnType = unsafe fn(&[u8], &mut [u8]);
pub(crate) type EqFnType = unsafe fn(&[u8], &[u8]) -> bool;
pub(crate) type HashFnType = unsafe fn(&[u8], &mut dyn Hasher);
pub(crate) type FmtFnType = unsafe fn(&[u8], &mut fmt::Formatter) -> Result<(), fmt::Error>;
pub(crate) type DropFnType = unsafe fn(&mut [u8]);
macro_rules! impl_fn_wrapper {
(derive() struct $fn:ident ( $fn_type:ident )) => {
pub struct $fn (pub(crate) $fn_type);
impl_fn_wrapper!(@impls $fn ( $fn_type ));
};
($derives:meta struct $fn:ident ( $fn_type:ident )) => {
#[$derives]
pub struct $fn (pub(crate) $fn_type);
impl_fn_wrapper!(@impls $fn ( $fn_type ));
};
(@impls $fn:ident ( $fn_type:ident )) => {
//impl $fn {
// pub fn new(f: $fn_type) -> Self {
// $fn(f)
// }
//}
impl fmt::Debug for $fn {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple(stringify!($fn)).finish()
}
}
impl PartialEq for $fn {
fn eq(&self, _: &Self) -> bool {
// Equality is completely determined by VecCopy.
true
}
}
impl Hash for $fn {
fn hash<H: Hasher>(&self, state: &mut H) {
(self.0 as usize).hash(state);
}
}
}
}
impl_fn_wrapper!(derive(Copy, Clone) struct CloneFn(CloneFnType));
impl_fn_wrapper!(derive(Copy, Clone) struct CloneFromFn(CloneFromFnType));
impl_fn_wrapper!(derive(Copy, Clone) struct EqFn(EqFnType));
impl_fn_wrapper!(derive(Copy, Clone) struct HashFn(HashFnType));
impl_fn_wrapper!(derive(Copy, Clone) struct FmtFn(FmtFnType));
impl_fn_wrapper!(derive(Copy, Clone) struct DropFn(DropFnType));
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.